get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/98321/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 98321,
    "url": "https://patches.dpdk.org/api/patches/98321/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20210908103016.1661914-7-kevin.laatz@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210908103016.1661914-7-kevin.laatz@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210908103016.1661914-7-kevin.laatz@intel.com",
    "date": "2021-09-08T10:30:05",
    "name": "[v3,06/17] dma/idxd: create dmadev instances on pci probe",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "7d3039e3fefd878ef962df32df1bf3b30d1c521a",
    "submitter": {
        "id": 921,
        "url": "https://patches.dpdk.org/api/people/921/?format=api",
        "name": "Kevin Laatz",
        "email": "kevin.laatz@intel.com"
    },
    "delegate": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20210908103016.1661914-7-kevin.laatz@intel.com/mbox/",
    "series": [
        {
            "id": 18762,
            "url": "https://patches.dpdk.org/api/series/18762/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=18762",
            "date": "2021-09-08T10:29:59",
            "name": "add dmadev driver for idxd devices",
            "version": 3,
            "mbox": "https://patches.dpdk.org/series/18762/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/98321/comments/",
    "check": "warning",
    "checks": "https://patches.dpdk.org/api/patches/98321/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id D4CD9A0C56;\n\tWed,  8 Sep 2021 12:31:01 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id D601D411B5;\n\tWed,  8 Sep 2021 12:30:35 +0200 (CEST)",
            "from mga06.intel.com (mga06.intel.com [134.134.136.31])\n by mails.dpdk.org (Postfix) with ESMTP id 4289D411AF\n for <dev@dpdk.org>; Wed,  8 Sep 2021 12:30:33 +0200 (CEST)",
            "from orsmga001.jf.intel.com ([10.7.209.18])\n by orsmga104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 08 Sep 2021 03:30:32 -0700",
            "from silpixa00401122.ir.intel.com ([10.55.128.10])\n by orsmga001.jf.intel.com with ESMTP; 08 Sep 2021 03:30:31 -0700"
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6200,9189,10100\"; a=\"281461943\"",
            "E=Sophos;i=\"5.85,277,1624345200\"; d=\"scan'208\";a=\"281461943\"",
            "E=Sophos;i=\"5.85,277,1624345200\"; d=\"scan'208\";a=\"513213847\""
        ],
        "X-ExtLoop1": "1",
        "From": "Kevin Laatz <kevin.laatz@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "bruce.richardson@intel.com, fengchengwen@huawei.com, jerinj@marvell.com,\n conor.walsh@intel.com, Kevin Laatz <kevin.laatz@intel.com>",
        "Date": "Wed,  8 Sep 2021 10:30:05 +0000",
        "Message-Id": "<20210908103016.1661914-7-kevin.laatz@intel.com>",
        "X-Mailer": "git-send-email 2.30.2",
        "In-Reply-To": "<20210908103016.1661914-1-kevin.laatz@intel.com>",
        "References": "<20210903105001.1179328-1-kevin.laatz@intel.com>\n <20210908103016.1661914-1-kevin.laatz@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Subject": "[dpdk-dev] [PATCH v3 06/17] dma/idxd: create dmadev instances on\n pci probe",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "When a suitable device is found during the PCI probe, create a dmadev\ninstance for each HW queue. HW definitions required are also included.\n\nSigned-off-by: Bruce Richardson <bruce.richardson@intel.com>\nSigned-off-by: Kevin Laatz <kevin.laatz@intel.com>\n---\n drivers/dma/idxd/idxd_hw_defs.h  |  71 ++++++++\n drivers/dma/idxd/idxd_internal.h |  16 ++\n drivers/dma/idxd/idxd_pci.c      | 272 ++++++++++++++++++++++++++++++-\n 3 files changed, 356 insertions(+), 3 deletions(-)\n create mode 100644 drivers/dma/idxd/idxd_hw_defs.h",
    "diff": "diff --git a/drivers/dma/idxd/idxd_hw_defs.h b/drivers/dma/idxd/idxd_hw_defs.h\nnew file mode 100644\nindex 0000000000..ea627cba6d\n--- /dev/null\n+++ b/drivers/dma/idxd/idxd_hw_defs.h\n@@ -0,0 +1,71 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright 2021 Intel Corporation\n+ */\n+\n+#ifndef _IDXD_HW_DEFS_H_\n+#define _IDXD_HW_DEFS_H_\n+\n+/*** Definitions for Intel(R) Data Streaming Accelerator  ***/\n+\n+#define IDXD_CMD_SHIFT 20\n+enum rte_idxd_cmds {\n+\tidxd_enable_dev = 1,\n+\tidxd_disable_dev,\n+\tidxd_drain_all,\n+\tidxd_abort_all,\n+\tidxd_reset_device,\n+\tidxd_enable_wq,\n+\tidxd_disable_wq,\n+\tidxd_drain_wq,\n+\tidxd_abort_wq,\n+\tidxd_reset_wq,\n+};\n+\n+/* General bar0 registers */\n+struct rte_idxd_bar0 {\n+\tuint32_t __rte_cache_aligned version;    /* offset 0x00 */\n+\tuint64_t __rte_aligned(0x10) gencap;     /* offset 0x10 */\n+\tuint64_t __rte_aligned(0x10) wqcap;      /* offset 0x20 */\n+\tuint64_t __rte_aligned(0x10) grpcap;     /* offset 0x30 */\n+\tuint64_t __rte_aligned(0x08) engcap;     /* offset 0x38 */\n+\tuint64_t __rte_aligned(0x10) opcap;      /* offset 0x40 */\n+\tuint64_t __rte_aligned(0x20) offsets[2]; /* offset 0x60 */\n+\tuint32_t __rte_aligned(0x20) gencfg;     /* offset 0x80 */\n+\tuint32_t __rte_aligned(0x08) genctrl;    /* offset 0x88 */\n+\tuint32_t __rte_aligned(0x10) gensts;     /* offset 0x90 */\n+\tuint32_t __rte_aligned(0x08) intcause;   /* offset 0x98 */\n+\tuint32_t __rte_aligned(0x10) cmd;        /* offset 0xA0 */\n+\tuint32_t __rte_aligned(0x08) cmdstatus;  /* offset 0xA8 */\n+\tuint64_t __rte_aligned(0x20) swerror[4]; /* offset 0xC0 */\n+};\n+\n+/* workqueue config is provided by array of uint32_t. */\n+enum rte_idxd_wqcfg {\n+\twq_size_idx,       /* size is in first 32-bit value */\n+\twq_threshold_idx,  /* WQ threshold second 32-bits */\n+\twq_mode_idx,       /* WQ mode and other flags */\n+\twq_sizes_idx,      /* WQ transfer and batch sizes */\n+\twq_occ_int_idx,    /* WQ occupancy interrupt handle */\n+\twq_occ_limit_idx,  /* WQ occupancy limit */\n+\twq_state_idx,      /* WQ state and occupancy state */\n+};\n+\n+#define WQ_MODE_SHARED    0\n+#define WQ_MODE_DEDICATED 1\n+#define WQ_PRIORITY_SHIFT 4\n+#define WQ_BATCH_SZ_SHIFT 5\n+#define WQ_STATE_SHIFT 30\n+#define WQ_STATE_MASK 0x3\n+\n+struct rte_idxd_grpcfg {\n+\tuint64_t grpwqcfg[4]  __rte_cache_aligned; /* 64-byte register set */\n+\tuint64_t grpengcfg;  /* offset 32 */\n+\tuint32_t grpflags;   /* offset 40 */\n+};\n+\n+#define GENSTS_DEV_STATE_MASK 0x03\n+#define CMDSTATUS_ACTIVE_SHIFT 31\n+#define CMDSTATUS_ACTIVE_MASK (1 << 31)\n+#define CMDSTATUS_ERR_MASK 0xFF\n+\n+#endif\ndiff --git a/drivers/dma/idxd/idxd_internal.h b/drivers/dma/idxd/idxd_internal.h\nindex 99ab2df925..d92d7b3e6f 100644\n--- a/drivers/dma/idxd/idxd_internal.h\n+++ b/drivers/dma/idxd/idxd_internal.h\n@@ -5,6 +5,10 @@\n #ifndef _IDXD_INTERNAL_H_\n #define _IDXD_INTERNAL_H_\n \n+#include <rte_spinlock.h>\n+\n+#include \"idxd_hw_defs.h\"\n+\n /**\n  * @file idxd_internal.h\n  *\n@@ -24,6 +28,16 @@ extern int idxd_pmd_logtype;\n #define IDXD_PMD_ERR(fmt, args...)    IDXD_PMD_LOG(ERR, fmt, ## args)\n #define IDXD_PMD_WARN(fmt, args...)   IDXD_PMD_LOG(WARNING, fmt, ## args)\n \n+struct idxd_pci_common {\n+\trte_spinlock_t lk;\n+\n+\tuint8_t wq_cfg_sz;\n+\tvolatile struct rte_idxd_bar0 *regs;\n+\tvolatile uint32_t *wq_regs_base;\n+\tvolatile struct rte_idxd_grpcfg *grp_regs;\n+\tvolatile void *portals;\n+};\n+\n struct idxd_dmadev {\n \t/* counters to track the batches */\n \tunsigned short max_batches;\n@@ -58,6 +72,8 @@ struct idxd_dmadev {\n \t\tstruct {\n \t\t\tunsigned int dsa_id;\n \t\t} bus;\n+\n+\t\tstruct idxd_pci_common *pci;\n \t} u;\n };\n \ndiff --git a/drivers/dma/idxd/idxd_pci.c b/drivers/dma/idxd/idxd_pci.c\nindex 79e4aadcab..318931713c 100644\n--- a/drivers/dma/idxd/idxd_pci.c\n+++ b/drivers/dma/idxd/idxd_pci.c\n@@ -3,6 +3,9 @@\n  */\n \n #include <rte_bus_pci.h>\n+#include <rte_devargs.h>\n+#include <rte_dmadev_pmd.h>\n+#include <rte_malloc.h>\n \n #include \"idxd_internal.h\"\n \n@@ -16,17 +19,280 @@ const struct rte_pci_id pci_id_idxd_map[] = {\n \t{ .vendor_id = 0, /* sentinel */ },\n };\n \n+static inline int\n+idxd_pci_dev_command(struct idxd_dmadev *idxd, enum rte_idxd_cmds command)\n+{\n+\tuint8_t err_code;\n+\tuint16_t qid = idxd->qid;\n+\tint i = 0;\n+\n+\tif (command >= idxd_disable_wq && command <= idxd_reset_wq)\n+\t\tqid = (1 << qid);\n+\trte_spinlock_lock(&idxd->u.pci->lk);\n+\tidxd->u.pci->regs->cmd = (command << IDXD_CMD_SHIFT) | qid;\n+\n+\tdo {\n+\t\trte_pause();\n+\t\terr_code = idxd->u.pci->regs->cmdstatus;\n+\t\tif (++i >= 1000) {\n+\t\t\tIDXD_PMD_ERR(\"Timeout waiting for command response from HW\");\n+\t\t\trte_spinlock_unlock(&idxd->u.pci->lk);\n+\t\t\treturn err_code;\n+\t\t}\n+\t} while (idxd->u.pci->regs->cmdstatus & CMDSTATUS_ACTIVE_MASK);\n+\trte_spinlock_unlock(&idxd->u.pci->lk);\n+\n+\treturn err_code & CMDSTATUS_ERR_MASK;\n+}\n+\n+static uint32_t *\n+idxd_get_wq_cfg(struct idxd_pci_common *pci, uint8_t wq_idx)\n+{\n+\treturn RTE_PTR_ADD(pci->wq_regs_base,\n+\t\t\t(uintptr_t)wq_idx << (5 + pci->wq_cfg_sz));\n+}\n+\n+static int\n+idxd_is_wq_enabled(struct idxd_dmadev *idxd)\n+{\n+\tuint32_t state = idxd_get_wq_cfg(idxd->u.pci, idxd->qid)[wq_state_idx];\n+\treturn ((state >> WQ_STATE_SHIFT) & WQ_STATE_MASK) == 0x1;\n+}\n+\n+static const struct rte_dmadev_ops idxd_pci_ops = {\n+\n+};\n+\n+/* each portal uses 4 x 4k pages */\n+#define IDXD_PORTAL_SIZE (4096 * 4)\n+\n+static int\n+init_pci_device(struct rte_pci_device *dev, struct idxd_dmadev *idxd,\n+\t\tunsigned int max_queues)\n+{\n+\tstruct idxd_pci_common *pci;\n+\tuint8_t nb_groups, nb_engines, nb_wqs;\n+\tuint16_t grp_offset, wq_offset; /* how far into bar0 the regs are */\n+\tuint16_t wq_size, total_wq_size;\n+\tuint8_t lg2_max_batch, lg2_max_copy_size;\n+\tunsigned int i, err_code;\n+\n+\tpci = malloc(sizeof(*pci));\n+\tif (pci == NULL) {\n+\t\tIDXD_PMD_ERR(\"%s: Can't allocate memory\", __func__);\n+\t\tgoto err;\n+\t}\n+\trte_spinlock_init(&pci->lk);\n+\n+\t/* assign the bar registers, and then configure device */\n+\tpci->regs = dev->mem_resource[0].addr;\n+\tgrp_offset = (uint16_t)pci->regs->offsets[0];\n+\tpci->grp_regs = RTE_PTR_ADD(pci->regs, grp_offset * 0x100);\n+\twq_offset = (uint16_t)(pci->regs->offsets[0] >> 16);\n+\tpci->wq_regs_base = RTE_PTR_ADD(pci->regs, wq_offset * 0x100);\n+\tpci->portals = dev->mem_resource[2].addr;\n+\tpci->wq_cfg_sz = (pci->regs->wqcap >> 24) & 0x0F;\n+\n+\t/* sanity check device status */\n+\tif (pci->regs->gensts & GENSTS_DEV_STATE_MASK) {\n+\t\t/* need function-level-reset (FLR) or is enabled */\n+\t\tIDXD_PMD_ERR(\"Device status is not disabled, cannot init\");\n+\t\tgoto err;\n+\t}\n+\tif (pci->regs->cmdstatus & CMDSTATUS_ACTIVE_MASK) {\n+\t\t/* command in progress */\n+\t\tIDXD_PMD_ERR(\"Device has a command in progress, cannot init\");\n+\t\tgoto err;\n+\t}\n+\n+\t/* read basic info about the hardware for use when configuring */\n+\tnb_groups = (uint8_t)pci->regs->grpcap;\n+\tnb_engines = (uint8_t)pci->regs->engcap;\n+\tnb_wqs = (uint8_t)(pci->regs->wqcap >> 16);\n+\ttotal_wq_size = (uint16_t)pci->regs->wqcap;\n+\tlg2_max_copy_size = (uint8_t)(pci->regs->gencap >> 16) & 0x1F;\n+\tlg2_max_batch = (uint8_t)(pci->regs->gencap >> 21) & 0x0F;\n+\n+\tIDXD_PMD_DEBUG(\"nb_groups = %u, nb_engines = %u, nb_wqs = %u\",\n+\t\t\tnb_groups, nb_engines, nb_wqs);\n+\n+\t/* zero out any old config */\n+\tfor (i = 0; i < nb_groups; i++) {\n+\t\tpci->grp_regs[i].grpengcfg = 0;\n+\t\tpci->grp_regs[i].grpwqcfg[0] = 0;\n+\t}\n+\tfor (i = 0; i < nb_wqs; i++)\n+\t\tidxd_get_wq_cfg(pci, i)[0] = 0;\n+\n+\t/* limit queues if necessary */\n+\tif (max_queues != 0 && nb_wqs > max_queues) {\n+\t\tnb_wqs = max_queues;\n+\t\tif (nb_engines > max_queues)\n+\t\t\tnb_engines = max_queues;\n+\t\tif (nb_groups > max_queues)\n+\t\t\tnb_engines = max_queues;\n+\t\tIDXD_PMD_DEBUG(\"Limiting queues to %u\", nb_wqs);\n+\t}\n+\n+\t/* put each engine into a separate group to avoid reordering */\n+\tif (nb_groups > nb_engines)\n+\t\tnb_groups = nb_engines;\n+\tif (nb_groups < nb_engines)\n+\t\tnb_engines = nb_groups;\n+\n+\t/* assign engines to groups, round-robin style */\n+\tfor (i = 0; i < nb_engines; i++) {\n+\t\tIDXD_PMD_DEBUG(\"Assigning engine %u to group %u\",\n+\t\t\t\ti, i % nb_groups);\n+\t\tpci->grp_regs[i % nb_groups].grpengcfg |= (1ULL << i);\n+\t}\n+\n+\t/* now do the same for queues and give work slots to each queue */\n+\twq_size = total_wq_size / nb_wqs;\n+\tIDXD_PMD_DEBUG(\"Work queue size = %u, max batch = 2^%u, max copy = 2^%u\",\n+\t\t\twq_size, lg2_max_batch, lg2_max_copy_size);\n+\tfor (i = 0; i < nb_wqs; i++) {\n+\t\t/* add engine \"i\" to a group */\n+\t\tIDXD_PMD_DEBUG(\"Assigning work queue %u to group %u\",\n+\t\t\t\ti, i % nb_groups);\n+\t\tpci->grp_regs[i % nb_groups].grpwqcfg[0] |= (1ULL << i);\n+\t\t/* now configure it, in terms of size, max batch, mode */\n+\t\tidxd_get_wq_cfg(pci, i)[wq_size_idx] = wq_size;\n+\t\tidxd_get_wq_cfg(pci, i)[wq_mode_idx] = (1 << WQ_PRIORITY_SHIFT) |\n+\t\t\t\tWQ_MODE_DEDICATED;\n+\t\tidxd_get_wq_cfg(pci, i)[wq_sizes_idx] = lg2_max_copy_size |\n+\t\t\t\t(lg2_max_batch << WQ_BATCH_SZ_SHIFT);\n+\t}\n+\n+\t/* dump the group configuration to output */\n+\tfor (i = 0; i < nb_groups; i++) {\n+\t\tIDXD_PMD_DEBUG(\"## Group %d\", i);\n+\t\tIDXD_PMD_DEBUG(\"    GRPWQCFG: %\"PRIx64, pci->grp_regs[i].grpwqcfg[0]);\n+\t\tIDXD_PMD_DEBUG(\"    GRPENGCFG: %\"PRIx64, pci->grp_regs[i].grpengcfg);\n+\t\tIDXD_PMD_DEBUG(\"    GRPFLAGS: %\"PRIx32, pci->grp_regs[i].grpflags);\n+\t}\n+\n+\tidxd->u.pci = pci;\n+\tidxd->max_batches = wq_size;\n+\n+\t/* enable the device itself */\n+\terr_code = idxd_pci_dev_command(idxd, idxd_enable_dev);\n+\tif (err_code) {\n+\t\tIDXD_PMD_ERR(\"Error enabling device: code %#x\", err_code);\n+\t\treturn err_code;\n+\t}\n+\tIDXD_PMD_DEBUG(\"IDXD Device enabled OK\");\n+\n+\treturn nb_wqs;\n+\n+err:\n+\tfree(pci);\n+\treturn -1;\n+}\n+\n static int\n idxd_dmadev_probe_pci(struct rte_pci_driver *drv, struct rte_pci_device *dev)\n {\n-\tint ret = 0;\n+\tstruct idxd_dmadev idxd = {0};\n+\tuint8_t nb_wqs;\n+\tint qid, ret = 0;\n \tchar name[PCI_PRI_STR_SIZE];\n+\tunsigned int max_queues = 0;\n \n \trte_pci_device_name(&dev->addr, name, sizeof(name));\n \tIDXD_PMD_INFO(\"Init %s on NUMA node %d\", name, dev->device.numa_node);\n \tdev->device.driver = &drv->driver;\n \n-\treturn ret;\n+\tif (dev->device.devargs && dev->device.devargs->args[0] != '\\0') {\n+\t\t/* if the number of devargs grows beyond just 1, use rte_kvargs */\n+\t\tif (sscanf(dev->device.devargs->args,\n+\t\t\t\t\"max_queues=%u\", &max_queues) != 1) {\n+\t\t\tIDXD_PMD_ERR(\"Invalid device parameter: '%s'\",\n+\t\t\t\t\tdev->device.devargs->args);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\tret = init_pci_device(dev, &idxd, max_queues);\n+\tif (ret < 0) {\n+\t\tIDXD_PMD_ERR(\"Error initializing PCI hardware\");\n+\t\treturn ret;\n+\t}\n+\tif (idxd.u.pci->portals == NULL) {\n+\t\tIDXD_PMD_ERR(\"Error, invalid portal assigned during initialization\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\tnb_wqs = (uint8_t)ret;\n+\n+\t/* set up one device for each queue */\n+\tfor (qid = 0; qid < nb_wqs; qid++) {\n+\t\tchar qname[32];\n+\n+\t\t/* add the queue number to each device name */\n+\t\tsnprintf(qname, sizeof(qname), \"%s-q%d\", name, qid);\n+\t\tidxd.qid = qid;\n+\t\tidxd.portal = RTE_PTR_ADD(idxd.u.pci->portals,\n+\t\t\t\tqid * IDXD_PORTAL_SIZE);\n+\t\tif (idxd_is_wq_enabled(&idxd))\n+\t\t\tIDXD_PMD_ERR(\"Error, WQ %u seems enabled\", qid);\n+\t\tret = idxd_dmadev_create(qname, &dev->device,\n+\t\t\t\t&idxd, &idxd_pci_ops);\n+\t\tif (ret != 0) {\n+\t\t\tIDXD_PMD_ERR(\"Failed to create dmadev %s\", name);\n+\t\t\tif (qid == 0) /* if no devices using this, free pci */\n+\t\t\t\tfree(idxd.u.pci);\n+\t\t\treturn ret;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+idxd_dmadev_destroy(const char *name)\n+{\n+\tint ret;\n+\tuint8_t err_code;\n+\tstruct rte_dmadev *rdev;\n+\tstruct idxd_dmadev *idxd;\n+\n+\tif (!name) {\n+\t\tIDXD_PMD_ERR(\"Invalid device name\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\trdev = rte_dmadev_get_device_by_name(name);\n+\tif (!rdev) {\n+\t\tIDXD_PMD_ERR(\"Invalid device name (%s)\", name);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tidxd = rdev->dev_private;\n+\tif (!idxd) {\n+\t\tIDXD_PMD_ERR(\"Error getting dev_private\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* disable the device */\n+\terr_code = idxd_pci_dev_command(idxd, idxd_disable_dev);\n+\tif (err_code) {\n+\t\tIDXD_PMD_ERR(\"Error disabling device: code %#x\", err_code);\n+\t\treturn err_code;\n+\t}\n+\tIDXD_PMD_DEBUG(\"IDXD Device disabled OK\");\n+\n+\t/* free device memory */\n+\tIDXD_PMD_DEBUG(\"Freeing device driver memory\");\n+\trdev->dev_private = NULL;\n+\trte_free(idxd->batch_idx_ring);\n+\trte_free(idxd->desc_ring);\n+\n+\t/* rte_dmadev_close is called by pmd_release */\n+\tret = rte_dmadev_pmd_release(rdev);\n+\tif (ret)\n+\t\tIDXD_PMD_DEBUG(\"Device cleanup failed\");\n+\n+\treturn 0;\n }\n \n static int\n@@ -39,7 +305,7 @@ idxd_dmadev_remove_pci(struct rte_pci_device *dev)\n \tIDXD_PMD_INFO(\"Closing %s on NUMA node %d\",\n \t\t\tname, dev->device.numa_node);\n \n-\treturn 0;\n+\treturn idxd_dmadev_destroy(name);\n }\n \n struct rte_pci_driver idxd_pmd_drv_pci = {\n",
    "prefixes": [
        "v3",
        "06/17"
    ]
}