From patchwork Thu Mar 18 18:20:37 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Bruce Richardson X-Patchwork-Id: 89502 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id C4C2FA0561; Thu, 18 Mar 2021 19:21:17 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id A4822140EAF; Thu, 18 Mar 2021 19:21:12 +0100 (CET) Received: from mga18.intel.com (mga18.intel.com [134.134.136.126]) by mails.dpdk.org (Postfix) with ESMTP id E86FC140EA0 for ; Thu, 18 Mar 2021 19:21:09 +0100 (CET) IronPort-SDR: aku7w3CEmdQ7V+bn/cCo3dY+oeSpuwjUL/XMNzMsoGM5Q/ea/1AaYXEcAmNdvRjFjNgMz3BX5+ zuuoTYQ1JEew== X-IronPort-AV: E=McAfee;i="6000,8403,9927"; a="177336603" X-IronPort-AV: E=Sophos;i="5.81,259,1610438400"; d="scan'208";a="177336603" Received: from orsmga007.jf.intel.com ([10.7.209.58]) by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 18 Mar 2021 11:21:09 -0700 IronPort-SDR: q11LtNFVW+lPpaXZmvfTF+r6rGUH+ml5ZJltzEg+9bNDSwwVUy9/rwhUsSO7jQ/Jh3qVDRBSd5 Kk17tqWGg+DQ== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.81,259,1610438400"; d="scan'208";a="411998829" Received: from silpixa00399126.ir.intel.com ([10.237.223.184]) by orsmga007.jf.intel.com with ESMTP; 18 Mar 2021 11:21:08 -0700 From: Bruce Richardson To: dev@dpdk.org Cc: Bruce Richardson Date: Thu, 18 Mar 2021 18:20:37 +0000 Message-Id: <20210318182042.43658-2-bruce.richardson@intel.com> X-Mailer: git-send-email 2.27.0 In-Reply-To: <20210318182042.43658-1-bruce.richardson@intel.com> References: <20210318182042.43658-1-bruce.richardson@intel.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v1 1/6] raw/ioat: support limiting queues for idxd PCI device X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" When using a full device instance via vfio, allow the user to specify a maximum number of queues to configure rather than always using the max number of supported queues. Signed-off-by: Bruce Richardson --- drivers/raw/ioat/idxd_pci.c | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/drivers/raw/ioat/idxd_pci.c b/drivers/raw/ioat/idxd_pci.c index 01623f33f6..b48e565b4c 100644 --- a/drivers/raw/ioat/idxd_pci.c +++ b/drivers/raw/ioat/idxd_pci.c @@ -4,6 +4,7 @@ #include #include +#include #include "ioat_private.h" #include "ioat_spec.h" @@ -123,7 +124,8 @@ static const struct rte_rawdev_ops idxd_pci_ops = { #define IDXD_PORTAL_SIZE (4096 * 4) static int -init_pci_device(struct rte_pci_device *dev, struct idxd_rawdev *idxd) +init_pci_device(struct rte_pci_device *dev, struct idxd_rawdev *idxd, + unsigned int max_queues) { struct idxd_pci_common *pci; uint8_t nb_groups, nb_engines, nb_wqs; @@ -179,6 +181,16 @@ init_pci_device(struct rte_pci_device *dev, struct idxd_rawdev *idxd) for (i = 0; i < nb_wqs; i++) idxd_get_wq_cfg(pci, i)[0] = 0; + /* limit queues if necessary */ + if (max_queues != 0 && nb_wqs > max_queues) { + nb_wqs = max_queues; + if (nb_engines > max_queues) + nb_engines = max_queues; + if (nb_groups > max_queues) + nb_engines = max_queues; + IOAT_PMD_DEBUG("Limiting queues to %u", nb_wqs); + } + /* put each engine into a separate group to avoid reordering */ if (nb_groups > nb_engines) nb_groups = nb_engines; @@ -242,12 +254,23 @@ idxd_rawdev_probe_pci(struct rte_pci_driver *drv, struct rte_pci_device *dev) uint8_t nb_wqs; int qid, ret = 0; char name[PCI_PRI_STR_SIZE]; + unsigned int max_queues = 0; rte_pci_device_name(&dev->addr, name, sizeof(name)); IOAT_PMD_INFO("Init %s on NUMA node %d", name, dev->device.numa_node); dev->device.driver = &drv->driver; - ret = init_pci_device(dev, &idxd); + if (dev->device.devargs && dev->device.devargs->args[0] != '\0') { + /* if the number of devargs grows beyond just 1, use rte_kvargs */ + if (sscanf(dev->device.devargs->args, + "max_queues=%u", &max_queues) != 1) { + IOAT_PMD_ERR("Invalid device parameter: '%s'", + dev->device.devargs->args); + return -1; + } + } + + ret = init_pci_device(dev, &idxd, max_queues); if (ret < 0) { IOAT_PMD_ERR("Error initializing PCI hardware"); return ret; @@ -353,3 +376,4 @@ RTE_PMD_REGISTER_PCI(IDXD_PMD_RAWDEV_NAME_PCI, idxd_pmd_drv_pci); RTE_PMD_REGISTER_PCI_TABLE(IDXD_PMD_RAWDEV_NAME_PCI, pci_id_idxd_map); RTE_PMD_REGISTER_KMOD_DEP(IDXD_PMD_RAWDEV_NAME_PCI, "* igb_uio | uio_pci_generic | vfio-pci"); +RTE_PMD_REGISTER_PARAM_STRING(rawdev_idxd_pci, "max_queues=0"); From patchwork Thu Mar 18 18:20:38 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Bruce Richardson X-Patchwork-Id: 89503 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 5B730A0561; Thu, 18 Mar 2021 19:21:23 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id E9AF9140EB7; Thu, 18 Mar 2021 19:21:15 +0100 (CET) Received: from mga18.intel.com (mga18.intel.com [134.134.136.126]) by mails.dpdk.org (Postfix) with ESMTP id 24DBA140EB4 for ; Thu, 18 Mar 2021 19:21:13 +0100 (CET) IronPort-SDR: LZ0THkLAMmpZUka1uLUXKqnU9SKWMNzMqei1EzZXPVAkvPESBNjnuJq7gY79/nrPzoXCA0n48c WXx/O3uiDeBw== X-IronPort-AV: E=McAfee;i="6000,8403,9927"; a="177336620" X-IronPort-AV: E=Sophos;i="5.81,259,1610438400"; d="scan'208";a="177336620" Received: from orsmga007.jf.intel.com ([10.7.209.58]) by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 18 Mar 2021 11:21:13 -0700 IronPort-SDR: qP8SQ2yGIrz7VUx00AIAKwYW5mbYIVU7WisGH56T5r4SmLtEUs6vloiV8AX53uMeYk54mBKqZw Hbhc2Aj2PFjw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.81,259,1610438400"; d="scan'208";a="411998843" Received: from silpixa00399126.ir.intel.com ([10.237.223.184]) by orsmga007.jf.intel.com with ESMTP; 18 Mar 2021 11:21:12 -0700 From: Bruce Richardson To: dev@dpdk.org Cc: Bruce Richardson Date: Thu, 18 Mar 2021 18:20:38 +0000 Message-Id: <20210318182042.43658-3-bruce.richardson@intel.com> X-Mailer: git-send-email 2.27.0 In-Reply-To: <20210318182042.43658-1-bruce.richardson@intel.com> References: <20210318182042.43658-1-bruce.richardson@intel.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v1 2/6] raw/ioat: add component prefix to log messages X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add the driver prefix "IOAT" to log messages for the driver. Signed-off-by: Bruce Richardson --- drivers/raw/ioat/ioat_private.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/raw/ioat/ioat_private.h b/drivers/raw/ioat/ioat_private.h index 6c423811ec..f032d5fe3d 100644 --- a/drivers/raw/ioat/ioat_private.h +++ b/drivers/raw/ioat/ioat_private.h @@ -21,7 +21,7 @@ extern int ioat_pmd_logtype; #define IOAT_PMD_LOG(level, fmt, args...) rte_log(RTE_LOG_ ## level, \ - ioat_pmd_logtype, "%s(): " fmt "\n", __func__, ##args) + ioat_pmd_logtype, "IOAT: %s(): " fmt "\n", __func__, ##args) #define IOAT_PMD_DEBUG(fmt, args...) IOAT_PMD_LOG(DEBUG, fmt, ## args) #define IOAT_PMD_INFO(fmt, args...) IOAT_PMD_LOG(INFO, fmt, ## args) From patchwork Thu Mar 18 18:20:39 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Bruce Richardson X-Patchwork-Id: 89504 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id E6E4DA0561; Thu, 18 Mar 2021 19:21:28 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 3FEA5140ECA; Thu, 18 Mar 2021 19:21:21 +0100 (CET) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by mails.dpdk.org (Postfix) with ESMTP id EDDD3140EC9 for ; Thu, 18 Mar 2021 19:21:19 +0100 (CET) IronPort-SDR: iW0Io5XzdbCXQufWNlrbsPPVxhxYTlfsFi/x4JgW5XYphlJ+nyygMK01VvEdzyJER/GTCrldb/ pp2u98Fz5gDg== X-IronPort-AV: E=McAfee;i="6000,8403,9927"; a="187386505" X-IronPort-AV: E=Sophos;i="5.81,259,1610438400"; d="scan'208";a="187386505" Received: from orsmga007.jf.intel.com ([10.7.209.58]) by fmsmga104.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 18 Mar 2021 11:21:19 -0700 IronPort-SDR: qY2FdVZF0d34kfEqnuYwAcA0QzGXRxUQfXIGT0chulk0YXsFeXLHo3tz+OKBotOWeOFMFHxF9n SZWUJOoHDH6g== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.81,259,1610438400"; d="scan'208";a="411998863" Received: from silpixa00399126.ir.intel.com ([10.237.223.184]) by orsmga007.jf.intel.com with ESMTP; 18 Mar 2021 11:21:17 -0700 From: Bruce Richardson To: dev@dpdk.org Cc: Bruce Richardson Date: Thu, 18 Mar 2021 18:20:39 +0000 Message-Id: <20210318182042.43658-4-bruce.richardson@intel.com> X-Mailer: git-send-email 2.27.0 In-Reply-To: <20210318182042.43658-1-bruce.richardson@intel.com> References: <20210318182042.43658-1-bruce.richardson@intel.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v1 3/6] raw/ioat: add explicit padding to descriptor struct X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add an explicit padding field to the end of the descriptor structure so that when the batch descriptor is defined on the stack for perform-ops, the unused space is all zeroed appropriately. Signed-off-by: Bruce Richardson --- drivers/raw/ioat/rte_ioat_rawdev_fns.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/raw/ioat/rte_ioat_rawdev_fns.h b/drivers/raw/ioat/rte_ioat_rawdev_fns.h index c2c4601ca7..e96edc9053 100644 --- a/drivers/raw/ioat/rte_ioat_rawdev_fns.h +++ b/drivers/raw/ioat/rte_ioat_rawdev_fns.h @@ -140,7 +140,10 @@ struct rte_idxd_hw_desc { uint32_t size; /* length of data for op, or batch size */ - /* 28 bytes of padding here */ + uint16_t intr_handle; /* completion interrupt handle */ + + /* remaining 26 bytes are reserved */ + uint16_t __reserved[13]; } __rte_aligned(64); /** From patchwork Thu Mar 18 18:20:40 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Bruce Richardson X-Patchwork-Id: 89505 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id D2261A0561; Thu, 18 Mar 2021 19:21:34 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 63314140EDE; Thu, 18 Mar 2021 19:21:31 +0100 (CET) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id 078C2140EB3 for ; Thu, 18 Mar 2021 19:21:28 +0100 (CET) IronPort-SDR: ecjtxULThD7o4JTW5COjQxpsKAtSAYhU4kTNkllJryelgsFdx2ZJxpRmGguBhH0gS8phkuEnU0 brKPPCPoq5tA== X-IronPort-AV: E=McAfee;i="6000,8403,9927"; a="186391653" X-IronPort-AV: E=Sophos;i="5.81,259,1610438400"; d="scan'208";a="186391653" Received: from orsmga007.jf.intel.com ([10.7.209.58]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 18 Mar 2021 11:21:27 -0700 IronPort-SDR: Vjbonvl75fDotSOTiHcKzqk9dIhxxwE7RQykyjYAC8iVuCy0OXCgnOKKCD1cD5v3Z7jI6/9KaN atZik7WqUZ+g== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.81,259,1610438400"; d="scan'208";a="411998898" Received: from silpixa00399126.ir.intel.com ([10.237.223.184]) by orsmga007.jf.intel.com with ESMTP; 18 Mar 2021 11:21:26 -0700 From: Bruce Richardson To: dev@dpdk.org Cc: Bruce Richardson Date: Thu, 18 Mar 2021 18:20:40 +0000 Message-Id: <20210318182042.43658-5-bruce.richardson@intel.com> X-Mailer: git-send-email 2.27.0 In-Reply-To: <20210318182042.43658-1-bruce.richardson@intel.com> References: <20210318182042.43658-1-bruce.richardson@intel.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v1 4/6] raw/ioat: rework SW ring layout X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The ring management in the idxd part of the driver is more complex than it needs to be, tracking individual batches in a ring and having null descriptors as padding to avoid having single-operation batches. This can be simplified by using a regular ring-based layout, with additional overflow at the end to ensure that the one does not need to wrap within a batch. Signed-off-by: Bruce Richardson --- drivers/raw/ioat/idxd_pci.c | 5 +- drivers/raw/ioat/idxd_vdev.c | 3 +- drivers/raw/ioat/ioat_common.c | 99 +++++------ drivers/raw/ioat/ioat_rawdev_test.c | 1 + drivers/raw/ioat/rte_ioat_rawdev_fns.h | 229 +++++++++++++------------ 5 files changed, 179 insertions(+), 158 deletions(-) diff --git a/drivers/raw/ioat/idxd_pci.c b/drivers/raw/ioat/idxd_pci.c index b48e565b4c..13515dbc6c 100644 --- a/drivers/raw/ioat/idxd_pci.c +++ b/drivers/raw/ioat/idxd_pci.c @@ -90,7 +90,7 @@ idxd_pci_dev_start(struct rte_rawdev *dev) return 0; } - if (idxd->public.batch_ring == NULL) { + if (idxd->public.desc_ring == NULL) { IOAT_PMD_ERR("WQ %d has not been fully configured", idxd->qid); return -EINVAL; } @@ -337,7 +337,8 @@ idxd_rawdev_destroy(const char *name) /* free device memory */ IOAT_PMD_DEBUG("Freeing device driver memory"); rdev->dev_private = NULL; - rte_free(idxd->public.batch_ring); + rte_free(idxd->public.batch_idx_ring); + rte_free(idxd->public.desc_ring); rte_free(idxd->public.hdl_ring); rte_memzone_free(idxd->mz); diff --git a/drivers/raw/ioat/idxd_vdev.c b/drivers/raw/ioat/idxd_vdev.c index 30a53b3b82..af585053b4 100644 --- a/drivers/raw/ioat/idxd_vdev.c +++ b/drivers/raw/ioat/idxd_vdev.c @@ -209,7 +209,8 @@ idxd_rawdev_remove_vdev(struct rte_vdev_device *vdev) ret = -errno; } - rte_free(idxd->public.batch_ring); + rte_free(idxd->public.batch_idx_ring); + rte_free(idxd->public.desc_ring); rte_free(idxd->public.hdl_ring); rte_memzone_free(idxd->mz); diff --git a/drivers/raw/ioat/ioat_common.c b/drivers/raw/ioat/ioat_common.c index d055c36a2a..fcb30572e6 100644 --- a/drivers/raw/ioat/ioat_common.c +++ b/drivers/raw/ioat/ioat_common.c @@ -84,21 +84,21 @@ idxd_dev_dump(struct rte_rawdev *dev, FILE *f) fprintf(f, "Driver: %s\n\n", dev->driver_name); fprintf(f, "Portal: %p\n", rte_idxd->portal); - fprintf(f, "Batch Ring size: %u\n", rte_idxd->batch_ring_sz); - fprintf(f, "Comp Handle Ring size: %u\n\n", rte_idxd->hdl_ring_sz); - - fprintf(f, "Next batch: %u\n", rte_idxd->next_batch); - fprintf(f, "Next batch to be completed: %u\n", rte_idxd->next_completed); - for (i = 0; i < rte_idxd->batch_ring_sz; i++) { - struct rte_idxd_desc_batch *b = &rte_idxd->batch_ring[i]; - fprintf(f, "Batch %u @%p: submitted=%u, op_count=%u, hdl_end=%u\n", - i, b, b->submitted, b->op_count, b->hdl_end); - } - - fprintf(f, "\n"); - fprintf(f, "Next free hdl: %u\n", rte_idxd->next_free_hdl); - fprintf(f, "Last completed hdl: %u\n", rte_idxd->last_completed_hdl); - fprintf(f, "Next returned hdl: %u\n", rte_idxd->next_ret_hdl); + fprintf(f, "Config: {ring_size: %u, hdls_disable: %u}\n\n", + rte_idxd->cfg.ring_size, rte_idxd->cfg.hdls_disable); + + fprintf(f, "max batches: %u\n", rte_idxd->max_batches); + fprintf(f, "batch idx read: %u\n", rte_idxd->batch_idx_read); + fprintf(f, "batch idx write: %u\n", rte_idxd->batch_idx_write); + fprintf(f, "batch idxes:"); + for (i = 0; i < rte_idxd->max_batches + 1; i++) + fprintf(f, "%u ", rte_idxd->batch_idx_ring[i]); + fprintf(f, "\n\n"); + + fprintf(f, "hdls read: %u\n", rte_idxd->max_batches); + fprintf(f, "hdls avail: %u\n", rte_idxd->hdls_avail); + fprintf(f, "batch start: %u\n", rte_idxd->batch_start); + fprintf(f, "batch size: %u\n", rte_idxd->batch_size); return 0; } @@ -114,10 +114,8 @@ idxd_dev_info_get(struct rte_rawdev *dev, rte_rawdev_obj_t dev_info, if (info_size != sizeof(*cfg)) return -EINVAL; - if (cfg != NULL) { - cfg->ring_size = rte_idxd->hdl_ring_sz; - cfg->hdls_disable = rte_idxd->hdls_disable; - } + if (cfg != NULL) + *cfg = rte_idxd->cfg; return 0; } @@ -129,8 +127,6 @@ idxd_dev_configure(const struct rte_rawdev *dev, struct rte_idxd_rawdev *rte_idxd = &idxd->public; struct rte_ioat_rawdev_config *cfg = config; uint16_t max_desc = cfg->ring_size; - uint16_t max_batches = max_desc / BATCH_SIZE; - uint16_t i; if (config_size != sizeof(*cfg)) return -EINVAL; @@ -140,47 +136,34 @@ idxd_dev_configure(const struct rte_rawdev *dev, return -EAGAIN; } - rte_idxd->hdls_disable = cfg->hdls_disable; + rte_idxd->cfg = *cfg; - /* limit the batches to what can be stored in hardware */ - if (max_batches > idxd->max_batches) { - IOAT_PMD_DEBUG("Ring size of %u is too large for this device, need to limit to %u batches of %u", - max_desc, idxd->max_batches, BATCH_SIZE); - max_batches = idxd->max_batches; - max_desc = max_batches * BATCH_SIZE; - } if (!rte_is_power_of_2(max_desc)) max_desc = rte_align32pow2(max_desc); - IOAT_PMD_DEBUG("Rawdev %u using %u descriptors in %u batches", - dev->dev_id, max_desc, max_batches); + IOAT_PMD_DEBUG("Rawdev %u using %u descriptors", + dev->dev_id, max_desc); + rte_idxd->desc_ring_mask = max_desc - 1; /* in case we are reconfiguring a device, free any existing memory */ - rte_free(rte_idxd->batch_ring); + rte_free(rte_idxd->desc_ring); rte_free(rte_idxd->hdl_ring); - rte_idxd->batch_ring = rte_zmalloc(NULL, - sizeof(*rte_idxd->batch_ring) * max_batches, 0); - if (rte_idxd->batch_ring == NULL) + /* allocate the descriptor ring at 2x size as batches can't wrap */ + rte_idxd->desc_ring = rte_zmalloc(NULL, + sizeof(*rte_idxd->desc_ring) * max_desc * 2, 0); + if (rte_idxd->desc_ring == NULL) return -ENOMEM; + rte_idxd->desc_iova = rte_mem_virt2iova(rte_idxd->desc_ring); rte_idxd->hdl_ring = rte_zmalloc(NULL, sizeof(*rte_idxd->hdl_ring) * max_desc, 0); if (rte_idxd->hdl_ring == NULL) { - rte_free(rte_idxd->batch_ring); - rte_idxd->batch_ring = NULL; + rte_free(rte_idxd->desc_ring); + rte_idxd->desc_ring = NULL; return -ENOMEM; } - rte_idxd->batch_ring_sz = max_batches; - rte_idxd->hdl_ring_sz = max_desc; - - for (i = 0; i < rte_idxd->batch_ring_sz; i++) { - struct rte_idxd_desc_batch *b = &rte_idxd->batch_ring[i]; - b->batch_desc.completion = rte_mem_virt2iova(&b->comp); - b->batch_desc.desc_addr = rte_mem_virt2iova(&b->null_desc); - b->batch_desc.op_flags = (idxd_op_batch << IDXD_CMD_OP_SHIFT) | - IDXD_FLAG_COMPLETION_ADDR_VALID | - IDXD_FLAG_REQUEST_COMPLETION; - } + rte_idxd->hdls_read = rte_idxd->batch_start = 0; + rte_idxd->batch_size = 0; return 0; } @@ -191,6 +174,7 @@ idxd_rawdev_create(const char *name, struct rte_device *dev, const struct rte_rawdev_ops *ops) { struct idxd_rawdev *idxd; + struct rte_idxd_rawdev *public; struct rte_rawdev *rawdev = NULL; const struct rte_memzone *mz = NULL; char mz_name[RTE_MEMZONE_NAMESIZE]; @@ -245,13 +229,30 @@ idxd_rawdev_create(const char *name, struct rte_device *dev, idxd = rawdev->dev_private; *idxd = *base_idxd; /* copy over the main fields already passed in */ - idxd->public.type = RTE_IDXD_DEV; idxd->rawdev = rawdev; idxd->mz = mz; + public = &idxd->public; + public->type = RTE_IDXD_DEV; + public->max_batches = idxd->max_batches; + public->batch_idx_read = 0; + public->batch_idx_write = 0; + /* allocate batch index ring. The +1 is because we can never fully use + * the ring, otherwise read == write means both full and empty. + */ + public->batch_idx_ring = rte_zmalloc(NULL, + sizeof(uint16_t) * (idxd->max_batches + 1), 0); + if (public->batch_idx_ring == NULL) { + IOAT_PMD_ERR("Unable to reserve memory for batch data\n"); + ret = -ENOMEM; + goto cleanup; + } + return 0; cleanup: + if (mz) + rte_memzone_free(mz); if (rawdev) rte_rawdev_pmd_release(rawdev); diff --git a/drivers/raw/ioat/ioat_rawdev_test.c b/drivers/raw/ioat/ioat_rawdev_test.c index 101f24a677..3de8273704 100644 --- a/drivers/raw/ioat/ioat_rawdev_test.c +++ b/drivers/raw/ioat/ioat_rawdev_test.c @@ -124,6 +124,7 @@ test_enqueue_copies(int dev_id) if (rte_ioat_completed_ops(dev_id, 64, (void *)completed_src, (void *)completed_dst) != RTE_DIM(srcs)) { PRINT_ERR("Error with rte_ioat_completed_ops\n"); + rte_rawdev_dump(dev_id, stdout); return -1; } for (i = 0; i < RTE_DIM(srcs); i++) { diff --git a/drivers/raw/ioat/rte_ioat_rawdev_fns.h b/drivers/raw/ioat/rte_ioat_rawdev_fns.h index e96edc9053..231aa72612 100644 --- a/drivers/raw/ioat/rte_ioat_rawdev_fns.h +++ b/drivers/raw/ioat/rte_ioat_rawdev_fns.h @@ -5,6 +5,7 @@ #define _RTE_IOAT_RAWDEV_FNS_H_ #include +#include #include #include #include @@ -159,26 +160,6 @@ struct rte_idxd_completion { uint32_t invalid_flags; } __rte_aligned(32); -#define BATCH_SIZE 64 - -/** - * Structure used inside the driver for building up and submitting - * a batch of operations to the DSA hardware. - */ -struct rte_idxd_desc_batch { - struct rte_idxd_completion comp; /* the completion record for batch */ - - uint16_t submitted; - uint16_t op_count; - uint16_t hdl_end; - - struct rte_idxd_hw_desc batch_desc; - - /* batches must always have 2 descriptors, so put a null at the start */ - struct rte_idxd_hw_desc null_desc; - struct rte_idxd_hw_desc ops[BATCH_SIZE]; -}; - /** * structure used to save the "handles" provided by the user to be * returned to the user on job completion. @@ -198,19 +179,24 @@ struct rte_idxd_rawdev { void *portal; /* address to write the batch descriptor */ - /* counters to track the batches and the individual op handles */ - uint16_t batch_ring_sz; /* size of batch ring */ - uint16_t hdl_ring_sz; /* size of the user hdl ring */ + struct rte_ioat_rawdev_config cfg; + rte_iova_t desc_iova; /* base address of desc ring, needed for completions */ + + /* counters to track the batches */ + unsigned short max_batches; + unsigned short batch_idx_read; + unsigned short batch_idx_write; + unsigned short *batch_idx_ring; /* store where each batch ends */ - uint16_t next_batch; /* where we write descriptor ops */ - uint16_t next_completed; /* batch where we read completions */ - uint16_t next_ret_hdl; /* the next user hdl to return */ - uint16_t last_completed_hdl; /* the last user hdl that has completed */ - uint16_t next_free_hdl; /* where the handle for next op will go */ - uint16_t hdls_disable; /* disable tracking completion handles */ + /* track descriptors and handles */ + unsigned short desc_ring_mask; + unsigned short hdls_avail; /* handles for ops completed */ + unsigned short hdls_read; /* the read pointer for hdls/desc rings */ + unsigned short batch_start; /* start+size == write pointer for hdls/desc */ + unsigned short batch_size; + struct rte_idxd_hw_desc *desc_ring; struct rte_idxd_user_hdl *hdl_ring; - struct rte_idxd_desc_batch *batch_ring; }; static __rte_always_inline int @@ -372,35 +358,38 @@ __ioat_completed_ops(int dev_id, uint8_t max_copies, } static __rte_always_inline int -__idxd_write_desc(int dev_id, const struct rte_idxd_hw_desc *desc, +__idxd_write_desc(int dev_id, + const uint32_t op_flags, + const rte_iova_t src, + const rte_iova_t dst, + const uint32_t size, const struct rte_idxd_user_hdl *hdl) { struct rte_idxd_rawdev *idxd = (struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private; - struct rte_idxd_desc_batch *b = &idxd->batch_ring[idxd->next_batch]; + uint16_t write_idx = idxd->batch_start + idxd->batch_size; - /* check for room in the handle ring */ - if (((idxd->next_free_hdl + 1) & (idxd->hdl_ring_sz - 1)) == idxd->next_ret_hdl) + /* first check batch ring space then desc ring space */ + if ((idxd->batch_idx_read == 0 && idxd->batch_idx_write == idxd->max_batches) || + idxd->batch_idx_write + 1 == idxd->batch_idx_read) goto failed; - - /* check for space in current batch */ - if (b->op_count >= BATCH_SIZE) + if (((write_idx + 1) & idxd->desc_ring_mask) == idxd->hdls_read) goto failed; - /* check that we can actually use the current batch */ - if (b->submitted) - goto failed; + /* write desc and handle. Note, descriptors don't wrap */ + idxd->desc_ring[write_idx].pasid = 0; + idxd->desc_ring[write_idx].op_flags = op_flags; + idxd->desc_ring[write_idx].completion = 0; + idxd->desc_ring[write_idx].src = src; + idxd->desc_ring[write_idx].dst = dst; + idxd->desc_ring[write_idx].size = size; - /* write the descriptor */ - b->ops[b->op_count++] = *desc; - - /* store the completion details */ - if (!idxd->hdls_disable) - idxd->hdl_ring[idxd->next_free_hdl] = *hdl; - if (++idxd->next_free_hdl == idxd->hdl_ring_sz) - idxd->next_free_hdl = 0; + idxd->hdl_ring[write_idx & idxd->desc_ring_mask] = *hdl; + idxd->batch_size++; idxd->xstats.enqueued++; + + rte_prefetch0_write(&idxd->desc_ring[write_idx + 1]); return 1; failed: @@ -413,53 +402,48 @@ static __rte_always_inline int __idxd_enqueue_fill(int dev_id, uint64_t pattern, rte_iova_t dst, unsigned int length, uintptr_t dst_hdl) { - const struct rte_idxd_hw_desc desc = { - .op_flags = (idxd_op_fill << IDXD_CMD_OP_SHIFT) | - IDXD_FLAG_CACHE_CONTROL, - .src = pattern, - .dst = dst, - .size = length - }; const struct rte_idxd_user_hdl hdl = { .dst = dst_hdl }; - return __idxd_write_desc(dev_id, &desc, &hdl); + return __idxd_write_desc(dev_id, + (idxd_op_fill << IDXD_CMD_OP_SHIFT) | IDXD_FLAG_CACHE_CONTROL, + pattern, dst, length, &hdl); } static __rte_always_inline int __idxd_enqueue_copy(int dev_id, rte_iova_t src, rte_iova_t dst, unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl) { - const struct rte_idxd_hw_desc desc = { - .op_flags = (idxd_op_memmove << IDXD_CMD_OP_SHIFT) | - IDXD_FLAG_CACHE_CONTROL, - .src = src, - .dst = dst, - .size = length - }; const struct rte_idxd_user_hdl hdl = { .src = src_hdl, .dst = dst_hdl }; - return __idxd_write_desc(dev_id, &desc, &hdl); + return __idxd_write_desc(dev_id, + (idxd_op_memmove << IDXD_CMD_OP_SHIFT) | IDXD_FLAG_CACHE_CONTROL, + src, dst, length, &hdl); } static __rte_always_inline int __idxd_fence(int dev_id) { - static const struct rte_idxd_hw_desc fence = { - .op_flags = IDXD_FLAG_FENCE - }; static const struct rte_idxd_user_hdl null_hdl; - return __idxd_write_desc(dev_id, &fence, &null_hdl); + /* only op field needs filling - zero src, dst and length */ + return __idxd_write_desc(dev_id, IDXD_FLAG_FENCE, 0, 0, 0, &null_hdl); } static __rte_always_inline void -__idxd_movdir64b(volatile void *dst, const void *src) +__idxd_movdir64b(volatile void *dst, const struct rte_idxd_hw_desc *src) { asm volatile (".byte 0x66, 0x0f, 0x38, 0xf8, 0x02" : - : "a" (dst), "d" (src)); + : "a" (dst), "d" (src) + : "memory"); +} + +static __rte_always_inline rte_iova_t +__desc_idx_to_iova(struct rte_idxd_rawdev *idxd, uint16_t n) +{ + return idxd->desc_iova + (n * sizeof(struct rte_idxd_hw_desc)); } static __rte_always_inline void @@ -467,19 +451,48 @@ __idxd_perform_ops(int dev_id) { struct rte_idxd_rawdev *idxd = (struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private; - struct rte_idxd_desc_batch *b = &idxd->batch_ring[idxd->next_batch]; + /* write completion to last desc in the batch */ + uint16_t comp_idx = idxd->batch_start + idxd->batch_size - 1; + if (comp_idx > idxd->desc_ring_mask) { + comp_idx &= idxd->desc_ring_mask; + *((uint64_t *)&idxd->desc_ring[comp_idx]) = 0; /* zero start of desc */ + } - if (b->submitted || b->op_count == 0) + if (idxd->batch_size == 0) return; - b->hdl_end = idxd->next_free_hdl; - b->comp.status = 0; - b->submitted = 1; - b->batch_desc.size = b->op_count + 1; - __idxd_movdir64b(idxd->portal, &b->batch_desc); - - if (++idxd->next_batch == idxd->batch_ring_sz) - idxd->next_batch = 0; - idxd->xstats.started = idxd->xstats.enqueued; + + _mm_sfence(); /* fence before writing desc to device */ + if (idxd->batch_size > 1) { + struct rte_idxd_hw_desc batch_desc = { + .op_flags = (idxd_op_batch << IDXD_CMD_OP_SHIFT) | + IDXD_FLAG_COMPLETION_ADDR_VALID | + IDXD_FLAG_REQUEST_COMPLETION, + .desc_addr = __desc_idx_to_iova(idxd, idxd->batch_start), + .completion = __desc_idx_to_iova(idxd, comp_idx), + .size = idxd->batch_size, + }; + + __idxd_movdir64b(idxd->portal, &batch_desc); + } else { + /* special case batch size of 1, as not allowed by HW */ + /* comp_idx == batch_start */ + struct rte_idxd_hw_desc *desc = &idxd->desc_ring[comp_idx]; + desc->op_flags |= IDXD_FLAG_COMPLETION_ADDR_VALID | + IDXD_FLAG_REQUEST_COMPLETION; + desc->completion = __desc_idx_to_iova(idxd, comp_idx); + + __idxd_movdir64b(idxd->portal, desc); + } + + idxd->xstats.started += idxd->batch_size; + + idxd->batch_start += idxd->batch_size; + idxd->batch_start &= idxd->desc_ring_mask; + idxd->batch_size = 0; + + idxd->batch_idx_ring[idxd->batch_idx_write++] = comp_idx; + if (idxd->batch_idx_write > idxd->max_batches) + idxd->batch_idx_write = 0; } static __rte_always_inline int @@ -488,35 +501,39 @@ __idxd_completed_ops(int dev_id, uint8_t max_ops, { struct rte_idxd_rawdev *idxd = (struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private; - struct rte_idxd_desc_batch *b = &idxd->batch_ring[idxd->next_completed]; - uint16_t h_idx = idxd->next_ret_hdl; - int n = 0; - - while (b->submitted && b->comp.status != 0) { - idxd->last_completed_hdl = b->hdl_end; - b->submitted = 0; - b->op_count = 0; - if (++idxd->next_completed == idxd->batch_ring_sz) - idxd->next_completed = 0; - b = &idxd->batch_ring[idxd->next_completed]; + unsigned short n, h_idx; + + while (idxd->batch_idx_read != idxd->batch_idx_write) { + uint16_t idx_to_chk = idxd->batch_idx_ring[idxd->batch_idx_read]; + volatile struct rte_idxd_completion *comp_to_chk = + (struct rte_idxd_completion *)&idxd->desc_ring[idx_to_chk]; + if (comp_to_chk->status == 0) + break; + /* avail points to one after the last one written */ + idxd->hdls_avail = (idx_to_chk + 1) & idxd->desc_ring_mask; + idxd->batch_idx_read ++; + if (idxd->batch_idx_read > idxd->max_batches) + idxd->batch_idx_read = 0; } - if (!idxd->hdls_disable) - for (n = 0; n < max_ops && h_idx != idxd->last_completed_hdl; n++) { - src_hdls[n] = idxd->hdl_ring[h_idx].src; - dst_hdls[n] = idxd->hdl_ring[h_idx].dst; - if (++h_idx == idxd->hdl_ring_sz) - h_idx = 0; - } - else - while (h_idx != idxd->last_completed_hdl) { - n++; - if (++h_idx == idxd->hdl_ring_sz) - h_idx = 0; - } + if (idxd->cfg.hdls_disable) { + n = (idxd->hdls_avail < idxd->hdls_read) ? + (idxd->hdls_avail + idxd->desc_ring_mask + 1 - idxd->hdls_read) : + (idxd->hdls_avail - idxd->hdls_read); + idxd->hdls_read = idxd->hdls_avail; + goto out; + } - idxd->next_ret_hdl = h_idx; + for (n = 0, h_idx = idxd->hdls_read; + n < max_ops && h_idx != idxd->hdls_avail; n++) { + src_hdls[n] = idxd->hdl_ring[h_idx].src; + dst_hdls[n] = idxd->hdl_ring[h_idx].dst; + if (++h_idx > idxd->desc_ring_mask) + h_idx = 0; + } + idxd->hdls_read = h_idx; +out: idxd->xstats.completed += n; return n; } From patchwork Thu Mar 18 18:20:41 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Bruce Richardson X-Patchwork-Id: 89506 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id AFB58A0561; Thu, 18 Mar 2021 19:21:43 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id E8989140EF7; Thu, 18 Mar 2021 19:21:36 +0100 (CET) Received: from mga17.intel.com (mga17.intel.com [192.55.52.151]) by mails.dpdk.org (Postfix) with ESMTP id B23FF140EE6 for ; Thu, 18 Mar 2021 19:21:34 +0100 (CET) IronPort-SDR: XiPVc/DQYpR/1xahfFnWVbtJ5BCAhQytYLbYW8eUx8b1yFFEwe9tDGlGOxtWsgK3cQ2jtTJtnO 3bwS3qs5ZhNQ== X-IronPort-AV: E=McAfee;i="6000,8403,9927"; a="169668106" X-IronPort-AV: E=Sophos;i="5.81,259,1610438400"; d="scan'208";a="169668106" Received: from orsmga007.jf.intel.com ([10.7.209.58]) by fmsmga107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 18 Mar 2021 11:21:33 -0700 IronPort-SDR: aGBe+eRqcDSrR48mfBLrss9M0alMt0XJazvmehUoHzDku/wbnHhXpEg/HOrBCjMIb/jALgCWdt 7QYzzvfoRv/g== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.81,259,1610438400"; d="scan'208";a="411998935" Received: from silpixa00399126.ir.intel.com ([10.237.223.184]) by orsmga007.jf.intel.com with ESMTP; 18 Mar 2021 11:21:31 -0700 From: Bruce Richardson To: dev@dpdk.org Cc: Kevin Laatz , Bruce Richardson Date: Thu, 18 Mar 2021 18:20:41 +0000 Message-Id: <20210318182042.43658-6-bruce.richardson@intel.com> X-Mailer: git-send-email 2.27.0 In-Reply-To: <20210318182042.43658-1-bruce.richardson@intel.com> References: <20210318182042.43658-1-bruce.richardson@intel.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v1 5/6] raw/ioat: add api to query remaining ring space X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Kevin Laatz Add a new API to query remaining descriptor ring capacity. This API is useful, for example, when an application needs to enqueue a fragmented packet and wants to ensure that all segments of the packet will be enqueued together. Signed-off-by: Kevin Laatz Tested-by: Sunil Pai G --- drivers/raw/ioat/ioat_rawdev_test.c | 136 +++++++++++++++++++++++++ drivers/raw/ioat/rte_ioat_rawdev_fns.h | 46 +++++++++ 2 files changed, 182 insertions(+) diff --git a/drivers/raw/ioat/ioat_rawdev_test.c b/drivers/raw/ioat/ioat_rawdev_test.c index 3de8273704..3a4c7a5161 100644 --- a/drivers/raw/ioat/ioat_rawdev_test.c +++ b/drivers/raw/ioat/ioat_rawdev_test.c @@ -202,6 +202,138 @@ test_enqueue_fill(int dev_id) return 0; } +static int +test_burst_capacity(int dev_id, unsigned int ring_size) +{ + unsigned int i, j; + unsigned int length = 1024; + + /* Test to make sure it does not enqueue if we cannot fit the entire burst */ + do { +#define BURST_SIZE 19 +#define EXPECTED_REJECTS 5 + struct rte_mbuf *srcs[BURST_SIZE], *dsts[BURST_SIZE]; + struct rte_mbuf *completed_src[BURST_SIZE]; + struct rte_mbuf *completed_dst[BURST_SIZE]; + unsigned int cnt_success = 0; + unsigned int cnt_rejected = 0; + unsigned int valid_iters = (ring_size - 1)/BURST_SIZE; + + /* Enqueue burst until they won't fit + some extra iterations which should + * be rejected + */ + for (i = 0; i < valid_iters + EXPECTED_REJECTS; i++) { + if (rte_ioat_burst_capacity(dev_id) >= BURST_SIZE) { + for (j = 0; j < BURST_SIZE; j++) { + + srcs[j] = rte_pktmbuf_alloc(pool); + dsts[j] = rte_pktmbuf_alloc(pool); + srcs[j]->data_len = srcs[j]->pkt_len = length; + dsts[j]->data_len = dsts[j]->pkt_len = length; + + if (rte_ioat_enqueue_copy(dev_id, + srcs[j]->buf_iova + srcs[j]->data_off, + dsts[j]->buf_iova + dsts[j]->data_off, + length, + (uintptr_t)srcs[j], + (uintptr_t)dsts[j]) != 1) { + PRINT_ERR("Error with rte_ioat_enqueue_copy\n"); + return -1; + } + + rte_pktmbuf_free(srcs[j]); + rte_pktmbuf_free(dsts[j]); + cnt_success++; + } + } else { + cnt_rejected++; + } + } + + /* do cleanup before next tests */ + rte_ioat_perform_ops(dev_id); + usleep(100); + for (i = 0; i < valid_iters; i++) { + if (rte_ioat_completed_ops(dev_id, BURST_SIZE, (void *)completed_src, + (void *)completed_dst) != BURST_SIZE) { + PRINT_ERR("error with completions\n"); + return -1; + } + } + + printf("successful_enqueues: %u expected_successful: %u rejected_iters: %u expected_rejects: %u\n", + cnt_success, valid_iters * BURST_SIZE, cnt_rejected, + EXPECTED_REJECTS); + + if (!(cnt_success == (valid_iters * BURST_SIZE)) && + !(cnt_rejected == EXPECTED_REJECTS)) { + PRINT_ERR("Burst Capacity test failed\n"); + return -1; + } + } while (0); + + /* Verify that space is taken and free'd as expected. + * Repeat the test to verify wrap-around handling is correct in + * rte_ioat_burst_capacity(). + */ + for (i = 0; i < ring_size / 32; i++) { + struct rte_mbuf *srcs[64], *dsts[64]; + struct rte_mbuf *completed_src[64]; + struct rte_mbuf *completed_dst[64]; + + /* Make sure the ring is clean before we start */ + if (rte_ioat_burst_capacity(dev_id) != ring_size - 1) { + PRINT_ERR("Error, ring should be empty\n"); + return -1; + } + + /* Enqueue 64 mbufs & verify that space is taken */ + for (j = 0; j < 64; j++) { + srcs[j] = rte_pktmbuf_alloc(pool); + dsts[j] = rte_pktmbuf_alloc(pool); + srcs[j]->data_len = srcs[j]->pkt_len = length; + dsts[j]->data_len = dsts[j]->pkt_len = length; + + if (rte_ioat_enqueue_copy(dev_id, + srcs[j]->buf_iova + srcs[j]->data_off, + dsts[j]->buf_iova + dsts[j]->data_off, + length, + (uintptr_t)srcs[j], + (uintptr_t)dsts[j]) != 1) { + PRINT_ERR("Error with rte_ioat_enqueue_copy\n"); + return -1; + } + + rte_pktmbuf_free(srcs[j]); + rte_pktmbuf_free(dsts[j]); + } + + if (rte_ioat_burst_capacity(dev_id) != (ring_size - 1) - 64) { + PRINT_ERR("Error, space available not as expected\n"); + return -1; + } + + /* Copy, gather completions, and make sure the space is free'd again */ + rte_ioat_perform_ops(dev_id); + usleep(100); + for (j = 0; j < 2; j++) { + if (rte_ioat_completed_ops(dev_id, 32, (void *)completed_src, + (void *)completed_dst) != 32) { + PRINT_ERR("error with completions\n"); + return -1; + } + } + + if (rte_ioat_burst_capacity(dev_id) != ring_size - 1) { + PRINT_ERR("Error, space available not as expected\n"); + return -1; + } + + } + + return 0; +} + int ioat_rawdev_test(uint16_t dev_id) { @@ -310,6 +442,10 @@ ioat_rawdev_test(uint16_t dev_id) } printf("\n"); + printf("Running Burst Capacity Test\n"); + if (test_burst_capacity(dev_id, expected_ring_size[dev_id]) != 0) + goto err; + rte_rawdev_stop(dev_id); if (rte_rawdev_xstats_reset(dev_id, NULL, 0) != 0) { PRINT_ERR("Error resetting xstat values\n"); diff --git a/drivers/raw/ioat/rte_ioat_rawdev_fns.h b/drivers/raw/ioat/rte_ioat_rawdev_fns.h index 231aa72612..ba40f29c25 100644 --- a/drivers/raw/ioat/rte_ioat_rawdev_fns.h +++ b/drivers/raw/ioat/rte_ioat_rawdev_fns.h @@ -199,6 +199,19 @@ struct rte_idxd_rawdev { struct rte_idxd_user_hdl *hdl_ring; }; +static __rte_always_inline uint16_t +__ioat_burst_capacity(int dev_id) +{ + struct rte_ioat_rawdev *ioat = + (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private; + unsigned short size = ioat->ring_size - 1; + unsigned short read = ioat->next_read; + unsigned short write = ioat->next_write; + unsigned short space = size - (write - read); + + return space; +} + static __rte_always_inline int __ioat_write_desc(int dev_id, uint32_t op, uint64_t src, phys_addr_t dst, unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl) @@ -357,6 +370,28 @@ __ioat_completed_ops(int dev_id, uint8_t max_copies, return count; } +static __rte_always_inline uint16_t +__idxd_burst_capacity(int dev_id) +{ + struct rte_idxd_rawdev *idxd = + (struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private; + uint16_t write_idx = idxd->batch_start + idxd->batch_size; + uint16_t used_space; + + /* Check for space in the batch ring */ + if ((idxd->batch_idx_read == 0 && idxd->batch_idx_write == idxd->max_batches) || + idxd->batch_idx_write + 1 == idxd->batch_idx_read) + return 0; + + /* for descriptors, check for wrap-around on write but not read */ + if (idxd->hdls_read > write_idx) + write_idx += idxd->desc_ring_mask + 1; + used_space = write_idx - idxd->hdls_read; + + /* Return amount of free space in the descriptor ring */ + return idxd->desc_ring_mask - used_space; +} + static __rte_always_inline int __idxd_write_desc(int dev_id, const uint32_t op_flags, @@ -538,6 +573,17 @@ __idxd_completed_ops(int dev_id, uint8_t max_ops, return n; } +static inline uint16_t +rte_ioat_burst_capacity(int dev_id) +{ + enum rte_ioat_dev_type *type = + (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private; + if (*type == RTE_IDXD_DEV) + return __idxd_burst_capacity(dev_id); + else + return __ioat_burst_capacity(dev_id); +} + static inline int rte_ioat_enqueue_fill(int dev_id, uint64_t pattern, phys_addr_t dst, unsigned int len, uintptr_t dst_hdl) From patchwork Thu Mar 18 18:20:42 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Bruce Richardson X-Patchwork-Id: 89507 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id D2F03A0561; Thu, 18 Mar 2021 19:21:49 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 1AD3C140F02; Thu, 18 Mar 2021 19:21:43 +0100 (CET) Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by mails.dpdk.org (Postfix) with ESMTP id 99A78140F00 for ; Thu, 18 Mar 2021 19:21:41 +0100 (CET) IronPort-SDR: JJYiH/G6FrAe5CDnAh1t7yUvOgNPiLoUDOB9/5sy+ljrDxcT2+MXKrRHdjhD1BjeRLDyoQ/WA4 g06HxF9rGSJQ== X-IronPort-AV: E=McAfee;i="6000,8403,9927"; a="176877161" X-IronPort-AV: E=Sophos;i="5.81,259,1610438400"; d="scan'208";a="176877161" Received: from orsmga007.jf.intel.com ([10.7.209.58]) by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 18 Mar 2021 11:21:40 -0700 IronPort-SDR: DY4uMlZIm8YwGsjNx9cqabdi0AwmL7wsnnWphVSaB7da8kPZoFjT3IfF9GkqHK6TXxUvmJZIvR 5aIn3WtznEQA== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.81,259,1610438400"; d="scan'208";a="411998979" Received: from silpixa00399126.ir.intel.com ([10.237.223.184]) by orsmga007.jf.intel.com with ESMTP; 18 Mar 2021 11:21:37 -0700 From: Bruce Richardson To: dev@dpdk.org Cc: Bruce Richardson Date: Thu, 18 Mar 2021 18:20:42 +0000 Message-Id: <20210318182042.43658-7-bruce.richardson@intel.com> X-Mailer: git-send-email 2.27.0 In-Reply-To: <20210318182042.43658-1-bruce.richardson@intel.com> References: <20210318182042.43658-1-bruce.richardson@intel.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v1 6/6] raw/ioat: add bus driver for device scanning automatically X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Rather than using a vdev with args, DPDK can scan and initialize the devices automatically using a bus-type driver. This bus does not need to worry about registering device drivers, rather it can initialize the devices directly on probe. The device instances (queues) to use are detected from /dev with the additional info about them got from /sys. Signed-off-by: Bruce Richardson --- drivers/raw/ioat/idxd_bus.c | 320 +++++++++++++++++++++++++++++++++++ drivers/raw/ioat/idxd_vdev.c | 232 ------------------------- drivers/raw/ioat/meson.build | 3 +- 3 files changed, 321 insertions(+), 234 deletions(-) create mode 100644 drivers/raw/ioat/idxd_bus.c delete mode 100644 drivers/raw/ioat/idxd_vdev.c -- 2.27.0 diff --git a/drivers/raw/ioat/idxd_bus.c b/drivers/raw/ioat/idxd_bus.c new file mode 100644 index 0000000000..ec15d9736a --- /dev/null +++ b/drivers/raw/ioat/idxd_bus.c @@ -0,0 +1,320 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2021 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include "ioat_private.h" + +/* default value for DSA paths, but allow override in environment for testing */ +#define DSA_DEV_PATH "/dev/dsa" +#define DSA_SYSFS_PATH "/sys/bus/dsa/devices" + +/** a DSA device instance */ +struct rte_dsa_device { + TAILQ_ENTRY(rte_dsa_device) next; /**< next dev in list */ + struct rte_device device; /**< Inherit core device */ + char wq_name[32]; /**< the workqueue name/number e.g. wq0.1 */ + uint16_t device_id; /**< the DSA instance number */ + uint16_t wq_id; /**< the queue on the DSA instance */ +}; + +/* forward prototypes */ +struct dsa_bus; +static int dsa_scan(void); +static int dsa_probe(void); +static struct rte_device *dsa_find_device(const struct rte_device *start, + rte_dev_cmp_t cmp, const void *data); + +/** List of devices */ +TAILQ_HEAD(dsa_device_list, rte_dsa_device); + +/** + * Structure describing the DSA bus + */ +struct dsa_bus { + struct rte_bus bus; /**< Inherit the generic class */ + struct rte_driver driver; /**< Driver struct for devices to point to */ + struct dsa_device_list device_list; /**< List of PCI devices */ +}; + +struct dsa_bus dsa_bus = { + .bus = { + .scan = dsa_scan, + .probe = dsa_probe, + .find_device = dsa_find_device, + }, + .driver = { + .name = "rawdev_idxd" + }, + .device_list = TAILQ_HEAD_INITIALIZER(dsa_bus.device_list), +}; + +static inline const char * +dsa_get_dev_path(void) +{ + const char *path = getenv("DSA_DEV_PATH"); + return path ? path : DSA_DEV_PATH; +} + +static inline const char * +dsa_get_sysfs_path(void) +{ + const char *path = getenv("DSA_SYSFS_PATH"); + return path ? path : DSA_SYSFS_PATH; +} + +static const struct rte_rawdev_ops idxd_vdev_ops = { + .dev_close = idxd_rawdev_close, + .dev_selftest = ioat_rawdev_test, + .dump = idxd_dev_dump, + .dev_configure = idxd_dev_configure, + .dev_info_get = idxd_dev_info_get, + .xstats_get = ioat_xstats_get, + .xstats_get_names = ioat_xstats_get_names, + .xstats_reset = ioat_xstats_reset, +}; + +static void * +idxd_vdev_mmap_wq(struct rte_dsa_device *dev) +{ + void *addr; + char path[PATH_MAX]; + int fd; + + snprintf(path, sizeof(path), "%s/%s", dsa_get_dev_path(), dev->wq_name); + fd = open(path, O_RDWR); + if (fd < 0) { + IOAT_PMD_ERR("Failed to open device path: %s", path); + return NULL; + } + + addr = mmap(NULL, 0x1000, PROT_WRITE, MAP_SHARED, fd, 0); + close(fd); + if (addr == MAP_FAILED) { + IOAT_PMD_ERR("Failed to mmap device %s", path); + return NULL; + } + + return addr; +} + +static int +read_wq_string(struct rte_dsa_device *dev, const char *filename, + char *value, size_t valuelen) +{ + char sysfs_node[PATH_MAX]; + int len; + int fd; + + snprintf(sysfs_node, sizeof(sysfs_node), "%s/%s/%s", + dsa_get_sysfs_path(), dev->wq_name, filename); + if ((fd = open(sysfs_node, O_RDONLY)) < 0) { + IOAT_PMD_ERR("%s(): opening file '%s' failed: %s", + __func__, sysfs_node, strerror(errno)); + return -1; + } + + len = read(fd, value, valuelen - 1); + close(fd); + if (len < 0) { + IOAT_PMD_ERR("%s(): error reading file '%s': %s", + __func__, sysfs_node, strerror(errno)); + return -1; + } + value[len] = '\0'; + return 0; +} + +static int +read_wq_int(struct rte_dsa_device *dev, const char *filename, + int *value) +{ + char sysfs_node[PATH_MAX]; + FILE *f; + int ret = 0; + + snprintf(sysfs_node, sizeof(sysfs_node), "%s/%s/%s", + dsa_get_sysfs_path(), dev->wq_name, filename); + f = fopen(sysfs_node, "r"); + if (f == NULL) { + IOAT_PMD_ERR("%s(): opening file '%s' failed: %s", + __func__, sysfs_node, strerror(errno)); + return -1; + } + + if (fscanf(f, "%d", value) != 1) { + IOAT_PMD_ERR("%s(): error reading file '%s': %s", + __func__, sysfs_node, strerror(errno)); + ret = -1; + } + + fclose(f); + return ret; +} + +static int +read_device_int(struct rte_dsa_device *dev, const char *filename, + int *value) +{ + char sysfs_node[PATH_MAX]; + FILE *f; + int ret; + + snprintf(sysfs_node, sizeof(sysfs_node), "%s/dsa%d/%s", + dsa_get_sysfs_path(), dev->device_id, filename); + f = fopen(sysfs_node, "r"); + if (f == NULL) { + IOAT_PMD_ERR("%s(): opening file '%s' failed: %s", + __func__, sysfs_node, strerror(errno)); + return -1; + } + + if (fscanf(f, "%d", value) != 1) { + IOAT_PMD_ERR("%s(): error reading file '%s': %s", + __func__, sysfs_node, strerror(errno)); + ret = -1; + } + + fclose(f); + return ret; +} + +static int +idxd_rawdev_probe_dsa(struct rte_dsa_device *dev) +{ + struct idxd_rawdev idxd = {{0}}; /* double {} to avoid error on BSD12 */ + int ret = 0; + + IOAT_PMD_INFO("Probing device %s on numa node %d", + dev->wq_name, dev->device.numa_node); + if (read_wq_int(dev, "size", &ret) < 0) + return -1; + idxd.max_batches = ret; + idxd.qid = dev->wq_id; + idxd.u.vdev.dsa_id = dev->device_id; + + idxd.public.portal = idxd_vdev_mmap_wq(dev); + if (idxd.public.portal == NULL) { + IOAT_PMD_ERR("WQ mmap failed"); + return -ENOENT; + } + + ret = idxd_rawdev_create(dev->wq_name, &dev->device, &idxd, &idxd_vdev_ops); + if (ret) { + IOAT_PMD_ERR("Failed to create rawdev %s", dev->wq_name); + return ret; + } + + return 0; +} + +static int +is_for_this_process_use(const char *name) +{ + char *runtime_dir = strdup(rte_eal_get_runtime_dir()); + char *prefix = basename(runtime_dir); + int prefixlen = strlen(prefix); + int retval = 0; + + if (strncmp(name, "dpdk_", 5) == 0) + retval = 1; + if (strncmp(name, prefix, prefixlen) == 0 && name[prefixlen] == '_') + retval = 1; + + free(runtime_dir); + return retval; +} + +static int +dsa_probe(void) +{ + struct rte_dsa_device *dev; + + TAILQ_FOREACH(dev, &dsa_bus.device_list, next) { + char type[64], name[64]; + + if (read_wq_string(dev, "type", type, sizeof(type)) < 0 || + read_wq_string(dev, "name", name, sizeof(name)) < 0) + continue; + + if (strncmp(type, "user", 4) == 0 && is_for_this_process_use(name)) { + dev->device.driver = &dsa_bus.driver; + idxd_rawdev_probe_dsa(dev); + continue; + } + IOAT_PMD_DEBUG("WQ '%s', not allocated to DPDK", dev->wq_name); + } + + return 0; +} + +static int +dsa_scan(void) +{ + const char *path = dsa_get_dev_path(); + struct dirent *wq; + DIR *dev_dir; + + dev_dir = opendir(path); + if (dev_dir == NULL) { + if (errno == ENOENT) + return 0; /* no bus, return witout error */ + IOAT_PMD_ERR("%s(): opendir '%s' failed: %s", + __func__, path, strerror(errno)); + return -1; + } + + while ((wq = readdir(dev_dir)) != NULL) { + struct rte_dsa_device *dev; + unsigned int device_id, wq_id; + int numa_node = -1; + + if (strncmp(wq->d_name, "wq", 2) != 0) + continue; + if (strnlen(wq->d_name, sizeof(dev->wq_name)) == sizeof(dev->wq_name)) { + IOAT_PMD_ERR("%s(): wq name too long: '%s', skipping", + __func__, wq->d_name); + continue; + } + IOAT_PMD_DEBUG("%s(): found %s/%s", __func__, path, wq->d_name); + + if (sscanf(wq->d_name, "wq%u.%u", &device_id, &wq_id) != 2) { + IOAT_PMD_ERR("Error parsing WQ name: %s", wq->d_name); + continue; + } + + dev = malloc(sizeof(*dev)); + dev->device_id = device_id; + dev->wq_id = wq_id; + dev->device.bus = &dsa_bus.bus; + strlcpy(dev->wq_name, wq->d_name, sizeof(dev->wq_name)); + TAILQ_INSERT_TAIL(&dsa_bus.device_list, dev, next); + + read_device_int(dev, "numa_node", &numa_node); + dev->device.numa_node = numa_node; + } + + return 0; +} + +static struct rte_device * +dsa_find_device(const struct rte_device *start, rte_dev_cmp_t cmp, + const void *data) +{ + RTE_SET_USED(start); + RTE_SET_USED(cmp); + RTE_SET_USED(data); + return NULL; +} + +RTE_REGISTER_BUS(dsa, dsa_bus.bus); diff --git a/drivers/raw/ioat/idxd_vdev.c b/drivers/raw/ioat/idxd_vdev.c deleted file mode 100644 index af585053b4..0000000000 --- a/drivers/raw/ioat/idxd_vdev.c +++ /dev/null @@ -1,232 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2020 Intel Corporation - */ - -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include "ioat_private.h" - -/** Name of the device driver */ -#define IDXD_PMD_RAWDEV_NAME rawdev_idxd -/* takes a work queue(WQ) as parameter */ -#define IDXD_ARG_WQ "wq" - -static const char * const valid_args[] = { - IDXD_ARG_WQ, - NULL -}; - -struct idxd_vdev_args { - uint8_t device_id; - uint8_t wq_id; -}; - -static const struct rte_rawdev_ops idxd_vdev_ops = { - .dev_close = idxd_rawdev_close, - .dev_selftest = ioat_rawdev_test, - .dump = idxd_dev_dump, - .dev_configure = idxd_dev_configure, - .dev_info_get = idxd_dev_info_get, - .xstats_get = ioat_xstats_get, - .xstats_get_names = ioat_xstats_get_names, - .xstats_reset = ioat_xstats_reset, -}; - -static void * -idxd_vdev_mmap_wq(struct idxd_vdev_args *args) -{ - void *addr; - char path[PATH_MAX]; - int fd; - - snprintf(path, sizeof(path), "/dev/dsa/wq%u.%u", - args->device_id, args->wq_id); - fd = open(path, O_RDWR); - if (fd < 0) { - IOAT_PMD_ERR("Failed to open device path"); - return NULL; - } - - addr = mmap(NULL, 0x1000, PROT_WRITE, MAP_SHARED, fd, 0); - close(fd); - if (addr == MAP_FAILED) { - IOAT_PMD_ERR("Failed to mmap device"); - return NULL; - } - - return addr; -} - -static int -idxd_rawdev_parse_wq(const char *key __rte_unused, const char *value, - void *extra_args) -{ - struct idxd_vdev_args *args = (struct idxd_vdev_args *)extra_args; - int dev, wq, bytes = -1; - int read = sscanf(value, "%d.%d%n", &dev, &wq, &bytes); - - if (read != 2 || bytes != (int)strlen(value)) { - IOAT_PMD_ERR("Error parsing work-queue id. Must be in . format"); - return -EINVAL; - } - - if (dev >= UINT8_MAX || wq >= UINT8_MAX) { - IOAT_PMD_ERR("Device or work queue id out of range"); - return -EINVAL; - } - - args->device_id = dev; - args->wq_id = wq; - - return 0; -} - -static int -idxd_vdev_parse_params(struct rte_kvargs *kvlist, struct idxd_vdev_args *args) -{ - int ret = 0; - - if (rte_kvargs_count(kvlist, IDXD_ARG_WQ) == 1) { - if (rte_kvargs_process(kvlist, IDXD_ARG_WQ, - &idxd_rawdev_parse_wq, args) < 0) { - IOAT_PMD_ERR("Error parsing %s", IDXD_ARG_WQ); - ret = -EINVAL; - } - } else { - IOAT_PMD_ERR("%s is a mandatory arg", IDXD_ARG_WQ); - ret = -EINVAL; - } - - rte_kvargs_free(kvlist); - return ret; -} - -static int -idxd_vdev_get_max_batches(struct idxd_vdev_args *args) -{ - char sysfs_path[PATH_MAX]; - FILE *f; - int ret; - - snprintf(sysfs_path, sizeof(sysfs_path), - "/sys/bus/dsa/devices/wq%u.%u/size", - args->device_id, args->wq_id); - f = fopen(sysfs_path, "r"); - if (f == NULL) - return -1; - - if (fscanf(f, "%d", &ret) != 1) - ret = -1; - - fclose(f); - return ret; -} - -static int -idxd_rawdev_probe_vdev(struct rte_vdev_device *vdev) -{ - struct rte_kvargs *kvlist; - struct idxd_rawdev idxd = {{0}}; /* double {} to avoid error on BSD12 */ - struct idxd_vdev_args vdev_args; - const char *name; - int ret = 0; - - name = rte_vdev_device_name(vdev); - if (name == NULL) - return -EINVAL; - - IOAT_PMD_INFO("Initializing pmd_idxd for %s", name); - - kvlist = rte_kvargs_parse(rte_vdev_device_args(vdev), valid_args); - if (kvlist == NULL) { - IOAT_PMD_ERR("Invalid kvargs key"); - return -EINVAL; - } - - ret = idxd_vdev_parse_params(kvlist, &vdev_args); - if (ret) { - IOAT_PMD_ERR("Failed to parse kvargs"); - return -EINVAL; - } - - idxd.qid = vdev_args.wq_id; - idxd.u.vdev.dsa_id = vdev_args.device_id; - idxd.max_batches = idxd_vdev_get_max_batches(&vdev_args); - - idxd.public.portal = idxd_vdev_mmap_wq(&vdev_args); - if (idxd.public.portal == NULL) { - IOAT_PMD_ERR("WQ mmap failed"); - return -ENOENT; - } - - ret = idxd_rawdev_create(name, &vdev->device, &idxd, &idxd_vdev_ops); - if (ret) { - IOAT_PMD_ERR("Failed to create rawdev %s", name); - return ret; - } - - return 0; -} - -static int -idxd_rawdev_remove_vdev(struct rte_vdev_device *vdev) -{ - struct idxd_rawdev *idxd; - const char *name; - struct rte_rawdev *rdev; - int ret = 0; - - name = rte_vdev_device_name(vdev); - if (name == NULL) - return -EINVAL; - - IOAT_PMD_INFO("Remove DSA vdev %p", name); - - rdev = rte_rawdev_pmd_get_named_dev(name); - if (!rdev) { - IOAT_PMD_ERR("Invalid device name (%s)", name); - return -EINVAL; - } - - idxd = rdev->dev_private; - - /* free context and memory */ - if (rdev->dev_private != NULL) { - IOAT_PMD_DEBUG("Freeing device driver memory"); - rdev->dev_private = NULL; - - if (munmap(idxd->public.portal, 0x1000) < 0) { - IOAT_PMD_ERR("Error unmapping portal"); - ret = -errno; - } - - rte_free(idxd->public.batch_idx_ring); - rte_free(idxd->public.desc_ring); - rte_free(idxd->public.hdl_ring); - - rte_memzone_free(idxd->mz); - } - - if (rte_rawdev_pmd_release(rdev)) - IOAT_PMD_ERR("Device cleanup failed"); - - return ret; -} - -struct rte_vdev_driver idxd_rawdev_drv_vdev = { - .probe = idxd_rawdev_probe_vdev, - .remove = idxd_rawdev_remove_vdev, -}; - -RTE_PMD_REGISTER_VDEV(IDXD_PMD_RAWDEV_NAME, idxd_rawdev_drv_vdev); -RTE_PMD_REGISTER_PARAM_STRING(IDXD_PMD_RAWDEV_NAME, - "wq="); diff --git a/drivers/raw/ioat/meson.build b/drivers/raw/ioat/meson.build index 6fbae05b78..b48e2b2c26 100644 --- a/drivers/raw/ioat/meson.build +++ b/drivers/raw/ioat/meson.build @@ -4,13 +4,12 @@ build = dpdk_conf.has('RTE_ARCH_X86') reason = 'only supported on x86' sources = files( + 'idxd_bus.c', 'idxd_pci.c', - 'idxd_vdev.c', 'ioat_common.c', 'ioat_rawdev.c', 'ioat_rawdev_test.c') deps += ['bus_pci', - 'bus_vdev', 'mbuf', 'rawdev']