get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/75061/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 75061,
    "url": "http://patches.dpdk.org/api/patches/75061/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1596138614-17409-11-git-send-email-timothy.mcdaniel@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1596138614-17409-11-git-send-email-timothy.mcdaniel@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1596138614-17409-11-git-send-email-timothy.mcdaniel@intel.com",
    "date": "2020-07-30T19:49:57",
    "name": "[10/27] event/dlb: add PFPMD-specific interface layer to shared code",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "f485717f179c0d83c7c0f537d22148198f5cf6cf",
    "submitter": {
        "id": 826,
        "url": "http://patches.dpdk.org/api/people/826/?format=api",
        "name": "Timothy McDaniel",
        "email": "timothy.mcdaniel@intel.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1596138614-17409-11-git-send-email-timothy.mcdaniel@intel.com/mbox/",
    "series": [
        {
            "id": 11425,
            "url": "http://patches.dpdk.org/api/series/11425/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=11425",
            "date": "2020-07-30T19:49:47",
            "name": "Add Intel DLM PMD to 20.11",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/11425/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/75061/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/75061/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id CB0D9A052B;\n\tThu, 30 Jul 2020 21:54:52 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 92B9E1C0D6;\n\tThu, 30 Jul 2020 21:53:25 +0200 (CEST)",
            "from mga02.intel.com (mga02.intel.com [134.134.136.20])\n by dpdk.org (Postfix) with ESMTP id C997D1C002\n for <dev@dpdk.org>; Thu, 30 Jul 2020 21:53:12 +0200 (CEST)",
            "from orsmga005.jf.intel.com ([10.7.209.41])\n by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 30 Jul 2020 12:53:11 -0700",
            "from txasoft-yocto.an.intel.com ([10.123.72.192])\n by orsmga005.jf.intel.com with ESMTP; 30 Jul 2020 12:53:10 -0700"
        ],
        "IronPort-SDR": [
            "\n 6Ajxd5bJllDguPWt2p39bGSYRtmVhd6MpJg7yHeCtz/Mzy/u6oTRFn4YKhtscFtBx7PnZ58JYv\n Qsoz59cqb7fA==",
            "\n hB6NwmgCfBfdZW3UKBxk3mHiOGIW3na3kLOcf9XwipLBqwWmeqRsq8dIQ0T57ZCBMxtnOk+T1W\n pdkcuZHLW2pw=="
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6000,8403,9698\"; a=\"139672305\"",
            "E=Sophos;i=\"5.75,415,1589266800\"; d=\"scan'208\";a=\"139672305\"",
            "E=Sophos;i=\"5.75,415,1589266800\"; d=\"scan'208\";a=\"465378103\""
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "From": "\"McDaniel, Timothy\" <timothy.mcdaniel@intel.com>",
        "To": "jerinj@marvell.com",
        "Cc": "mattias.ronnblom@ericsson.com, dev@dpdk.org, gage.eads@intel.com,\n harry.van.haaren@intel.com,\n \"McDaniel, Timothy\" <timothy.mcdaniel@intel.com>",
        "Date": "Thu, 30 Jul 2020 14:49:57 -0500",
        "Message-Id": "<1596138614-17409-11-git-send-email-timothy.mcdaniel@intel.com>",
        "X-Mailer": "git-send-email 1.7.10",
        "In-Reply-To": "<1596138614-17409-1-git-send-email-timothy.mcdaniel@intel.com>",
        "References": "<1593232671-5690-0-git-send-email-timothy.mcdaniel@intel.com>\n <1596138614-17409-1-git-send-email-timothy.mcdaniel@intel.com>",
        "Subject": "[dpdk-dev] [PATCH 10/27] event/dlb: add PFPMD-specific interface\n\tlayer to shared code",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: \"McDaniel, Timothy\" <timothy.mcdaniel@intel.com>\n\nSigned-off-by: McDaniel, Timothy <timothy.mcdaniel@intel.com>\n---\n drivers/event/dlb/Makefile      |    2 +\n drivers/event/dlb/meson.build   |    4 +-\n drivers/event/dlb/pf/dlb_main.c |  614 ++++++++++++++++++++++++++++++\n drivers/event/dlb/pf/dlb_main.h |   54 +++\n drivers/event/dlb/pf/dlb_pf.c   |  782 +++++++++++++++++++++++++++++++++++++++\n 5 files changed, 1455 insertions(+), 1 deletion(-)\n create mode 100644 drivers/event/dlb/pf/dlb_main.c\n create mode 100644 drivers/event/dlb/pf/dlb_main.h\n create mode 100644 drivers/event/dlb/pf/dlb_pf.c",
    "diff": "diff --git a/drivers/event/dlb/Makefile b/drivers/event/dlb/Makefile\nindex f191762..0d27fc3 100644\n--- a/drivers/event/dlb/Makefile\n+++ b/drivers/event/dlb/Makefile\n@@ -20,6 +20,8 @@ LIBABIVER := 1\n EXPORT_MAP := rte_pmd_dlb_event_version.map\n \n # library source files\n+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DLB_EVENTDEV) += pf/dlb_pf.c\n+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DLB_EVENTDEV) += pf/dlb_main.c\n SRCS-$(CONFIG_RTE_LIBRTE_PMD_DLB_EVENTDEV) += pf/base/dlb_resource.c\n \n # export include files\ndiff --git a/drivers/event/dlb/meson.build b/drivers/event/dlb/meson.build\nindex 4fae0eb..1e30b4c 100644\n--- a/drivers/event/dlb/meson.build\n+++ b/drivers/event/dlb/meson.build\n@@ -1,7 +1,9 @@\n # SPDX-License-Identifier: BSD-3-Clause\n # Copyright(c) 2019-2020 Intel Corporation\n \n-sources = files('pf/base/dlb_resource.c'\n+sources = files('pf/dlb_pf.c',\n+\t\t'pf/dlb_main.c',\n+\t\t'pf/base/dlb_resource.c'\n )\n \n deps += ['mbuf', 'mempool', 'ring', 'pci', 'bus_pci']\ndiff --git a/drivers/event/dlb/pf/dlb_main.c b/drivers/event/dlb/pf/dlb_main.c\nnew file mode 100644\nindex 0000000..9c49f94\n--- /dev/null\n+++ b/drivers/event/dlb/pf/dlb_main.c\n@@ -0,0 +1,614 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2016-2020 Intel Corporation\n+ */\n+\n+#include <stdint.h>\n+#include <stdbool.h>\n+#include <stdio.h>\n+#include <errno.h>\n+#include <assert.h>\n+#include <unistd.h>\n+#include <string.h>\n+\n+#include <rte_malloc.h>\n+#include <rte_errno.h>\n+\n+#include \"base/dlb_resource.h\"\n+#include \"base/dlb_osdep.h\"\n+#include \"base/dlb_regs.h\"\n+#include \"../dlb_priv.h\"\n+#include \"../dlb_inline_fns.h\"\n+#include \"../dlb_user.h\"\n+#include \"dlb_main.h\"\n+\n+#define PF_ID_ZERO 0\t/* PF ONLY! */\n+#define NO_OWNER_VF 0\t/* PF ONLY! */\n+#define NOT_VF_REQ false /* PF ONLY! */\n+\n+unsigned int dlb_unregister_timeout_s = DLB_DEFAULT_UNREGISTER_TIMEOUT_S;\n+\n+#define DLB_PCI_CFG_SPACE_SIZE 256\n+#define DLB_PCI_CAP_POINTER 0x34\n+#define DLB_PCI_CAP_NEXT(hdr) (((hdr) >> 8) & 0xFC)\n+#define DLB_PCI_CAP_ID(hdr) ((hdr) & 0xFF)\n+#define DLB_PCI_EXT_CAP_NEXT(hdr) (((hdr) >> 20) & 0xFFC)\n+#define DLB_PCI_EXT_CAP_ID(hdr) ((hdr) & 0xFFFF)\n+#define DLB_PCI_EXT_CAP_ID_ERR 1\n+#define DLB_PCI_ERR_UNCOR_MASK 8\n+#define DLB_PCI_ERR_UNC_UNSUP  0x00100000\n+\n+#define DLB_PCI_EXP_DEVCTL 8\n+#define DLB_PCI_LNKCTL 16\n+#define DLB_PCI_SLTCTL 24\n+#define DLB_PCI_RTCTL 28\n+#define DLB_PCI_EXP_DEVCTL2 40\n+#define DLB_PCI_LNKCTL2 48\n+#define DLB_PCI_SLTCTL2 56\n+#define DLB_PCI_CMD 4\n+#define DLB_PCI_X_CMD 2\n+#define DLB_PCI_EXP_DEVSTA 10\n+#define DLB_PCI_EXP_DEVSTA_TRPND 0x20\n+#define DLB_PCI_EXP_DEVCTL_BCR_FLR 0x8000\n+#define DLB_PCI_PASID_CTRL 6\n+#define DLB_PCI_PASID_CAP 4\n+\n+#define DLB_PCI_CAP_ID_EXP       0x10\n+#define DLB_PCI_CAP_ID_MSIX      0x11\n+#define DLB_PCI_EXT_CAP_ID_PAS   0x1B\n+#define DLB_PCI_EXT_CAP_ID_PRI   0x13\n+#define DLB_PCI_EXT_CAP_ID_ACS   0xD\n+\n+#define DLB_PCI_PASID_CAP_EXEC          0x2\n+#define DLB_PCI_PASID_CAP_PRIV          0x4\n+#define DLB_PCI_PASID_CTRL_ENABLE       0x1\n+#define DLB_PCI_PRI_CTRL_ENABLE         0x1\n+#define DLB_PCI_PRI_ALLOC_REQ           0xC\n+#define DLB_PCI_PRI_CTRL                0x4\n+#define DLB_PCI_MSIX_FLAGS              0x2\n+#define DLB_PCI_MSIX_FLAGS_ENABLE       0x8000\n+#define DLB_PCI_MSIX_FLAGS_MASKALL      0x4000\n+#define DLB_PCI_ERR_ROOT_STATUS         0x30\n+#define DLB_PCI_ERR_COR_STATUS          0x10\n+#define DLB_PCI_ERR_UNCOR_STATUS        0x4\n+#define DLB_PCI_COMMAND_INTX_DISABLE    0x400\n+#define DLB_PCI_ACS_CAP                 0x4\n+#define DLB_PCI_ACS_CTRL                0x6\n+#define DLB_PCI_ACS_SV                  0x1\n+#define DLB_PCI_ACS_RR                  0x4\n+#define DLB_PCI_ACS_CR                  0x8\n+#define DLB_PCI_ACS_UF                  0x10\n+#define DLB_PCI_ACS_EC                  0x20\n+\n+static int dlb_pci_find_ext_capability(struct rte_pci_device *pdev, uint32_t id)\n+{\n+\tuint32_t hdr;\n+\tsize_t sz;\n+\tint pos;\n+\n+\tpos = DLB_PCI_CFG_SPACE_SIZE;\n+\tsz = sizeof(hdr);\n+\n+\twhile (pos > 0xFF) {\n+\t\tif (rte_pci_read_config(pdev, &hdr, sz, pos) != (int)sz)\n+\t\t\treturn -1;\n+\n+\t\tif (DLB_PCI_EXT_CAP_ID(hdr) == id)\n+\t\t\treturn pos;\n+\n+\t\tpos = DLB_PCI_EXT_CAP_NEXT(hdr);\n+\t}\n+\n+\treturn -1;\n+}\n+\n+static int dlb_pci_find_capability(struct rte_pci_device *pdev, uint32_t id)\n+{\n+\tuint8_t pos;\n+\tint ret;\n+\tuint16_t hdr;\n+\n+\tret = rte_pci_read_config(pdev, &pos, 1, DLB_PCI_CAP_POINTER);\n+\tpos &= 0xFC;\n+\n+\tif (ret != 1)\n+\t\treturn -1;\n+\n+\twhile (pos > 0x3F) {\n+\t\tret = rte_pci_read_config(pdev, &hdr, 2, pos);\n+\t\tif (ret != 2)\n+\t\t\treturn -1;\n+\n+\t\tif (DLB_PCI_CAP_ID(hdr) == id)\n+\t\t\treturn pos;\n+\n+\t\tif (DLB_PCI_CAP_ID(hdr) == 0xFF)\n+\t\t\treturn -1;\n+\n+\t\tpos = DLB_PCI_CAP_NEXT(hdr);\n+\t}\n+\n+\treturn -1;\n+}\n+\n+static int dlb_mask_ur_err(struct rte_pci_device *pdev)\n+{\n+\tuint32_t mask;\n+\tsize_t sz = sizeof(mask);\n+\tint pos = dlb_pci_find_ext_capability(pdev, DLB_PCI_EXT_CAP_ID_ERR);\n+\n+\tif (pos < 0) {\n+\t\tprintf(\"[%s()] failed to find the aer capability\\n\",\n+\t\t       __func__);\n+\t\treturn pos;\n+\t}\n+\n+\tpos += DLB_PCI_ERR_UNCOR_MASK;\n+\n+\tif (rte_pci_read_config(pdev, &mask, sz, pos) != (int)sz) {\n+\t\tprintf(\"[%s()] Failed to read uncorrectable error mask reg\\n\",\n+\t\t       __func__);\n+\t\treturn -1;\n+\t}\n+\n+\t/* Mask Unsupported Request errors */\n+\tmask |= DLB_PCI_ERR_UNC_UNSUP;\n+\n+\tif (rte_pci_write_config(pdev, &mask, sz, pos) != (int)sz) {\n+\t\tprintf(\"[%s()] Failed to write uncorrectable error mask reg at offset %d\\n\",\n+\t\t       __func__, pos);\n+\t\treturn -1;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+struct dlb_dev *\n+dlb_probe(struct rte_pci_device *pdev)\n+{\n+\tstruct dlb_dev *dlb_dev;\n+\tint ret = 0;\n+\n+\tDLB_INFO(dlb_dev, \"probe\\n\");\n+\n+\tdlb_dev = rte_malloc(\"DLB_PF\", sizeof(struct dlb_dev),\n+\t\t\t     RTE_CACHE_LINE_SIZE);\n+\n+\tif (!dlb_dev) {\n+\t\tret = -ENOMEM;\n+\t\tgoto dlb_dev_malloc_fail;\n+\t}\n+\n+\t/* PCI Bus driver has already mapped bar space into process.\n+\t * Save off our IO register and FUNC addresses.\n+\t */\n+\n+\t/* BAR 0 */\n+\tif (pdev->mem_resource[0].addr == NULL) {\n+\t\tDLB_ERR(dlb_dev, \"probe: BAR 0 addr (csr_kva) is NULL\\n\");\n+\t\tret = -EINVAL;\n+\t\tgoto pci_mmap_bad_addr;\n+\t}\n+\tdlb_dev->hw.func_kva = (void *)(uintptr_t)pdev->mem_resource[0].addr;\n+\tdlb_dev->hw.func_phys_addr = pdev->mem_resource[0].phys_addr;\n+\n+\tDLB_INFO(dlb_dev, \"DLB FUNC VA=%p, PA=%p, len=%\"PRIu64\"\\n\",\n+\t\t (void *)dlb_dev->hw.func_kva,\n+\t\t (void *)dlb_dev->hw.func_phys_addr,\n+\t\t pdev->mem_resource[0].len);\n+\n+\t/* BAR 2 */\n+\tif (pdev->mem_resource[2].addr == NULL) {\n+\t\tDLB_ERR(dlb_dev, \"probe: BAR 2 addr (func_kva) is NULL\\n\");\n+\t\tret = -EINVAL;\n+\t\tgoto pci_mmap_bad_addr;\n+\t}\n+\tdlb_dev->hw.csr_kva = (void *)(uintptr_t)pdev->mem_resource[2].addr;\n+\tdlb_dev->hw.csr_phys_addr = pdev->mem_resource[2].phys_addr;\n+\n+\tDLB_INFO(dlb_dev, \"DLB CSR VA=%p, PA=%p, len=%\"PRIu64\"\\n\",\n+\t\t (void *)dlb_dev->hw.csr_kva,\n+\t\t (void *)dlb_dev->hw.csr_phys_addr,\n+\t\t pdev->mem_resource[2].len);\n+\n+\tdlb_dev->pdev = pdev;\n+\n+\tret = dlb_pf_reset(dlb_dev);\n+\tif (ret)\n+\t\tgoto dlb_reset_fail;\n+\n+\t/* DLB incorrectly sends URs in response to certain messages. Mask UR\n+\t * errors to prevent these from being propagated to the MCA.\n+\t */\n+\tret = dlb_mask_ur_err(pdev);\n+\tif (ret)\n+\t\tgoto mask_ur_err_fail;\n+\n+\tret = dlb_pf_init_driver_state(dlb_dev);\n+\tif (ret)\n+\t\tgoto init_driver_state_fail;\n+\n+\tret = dlb_resource_init(&dlb_dev->hw);\n+\tif (ret)\n+\t\tgoto resource_init_fail;\n+\n+\tdlb_dev->revision = os_get_dev_revision(&dlb_dev->hw);\n+\n+\tdlb_pf_init_hardware(dlb_dev);\n+\n+\treturn dlb_dev;\n+\n+resource_init_fail:\n+\tdlb_resource_free(&dlb_dev->hw);\n+init_driver_state_fail:\n+mask_ur_err_fail:\n+dlb_reset_fail:\n+pci_mmap_bad_addr:\n+\trte_free(dlb_dev);\n+dlb_dev_malloc_fail:\n+\trte_errno = ret;\n+\treturn NULL;\n+}\n+\n+int\n+dlb_pf_reset(struct dlb_dev *dlb_dev)\n+{\n+\tuint16_t devsta_busy_word, devctl_word, pasid_ctrl_word, pasid_features;\n+\tint msix_cap_offset, err_cap_offset, acs_cap_offset, wait_count;\n+\tuint16_t dev_ctl_word, dev_ctl2_word, lnk_word, lnk_word2;\n+\tint pcie_cap_offset, pasid_cap_offset, pri_cap_offset;\n+\tuint16_t rt_ctl_word, pri_reqs_dword,  pri_ctrl_word;\n+\tstruct rte_pci_device *pdev = dlb_dev->pdev;\n+\tuint16_t slt_word, slt_word2, cmd;\n+\tint ret = 0, i = 0;\n+\tuint32_t dword[16];\n+\toff_t off;\n+\n+\t/* Save PCI config state */\n+\n+\tfor (i = 0; i < 16; i++) {\n+\t\tif (rte_pci_read_config(pdev, &dword[i], 4, i * 4) != 4)\n+\t\t\treturn ret;\n+\t}\n+\n+\tpcie_cap_offset = dlb_pci_find_capability(pdev, DLB_PCI_CAP_ID_EXP);\n+\n+\tif (pcie_cap_offset < 0) {\n+\t\tprintf(\"[%s()] failed to find the pcie capability\\n\",\n+\t\t       __func__);\n+\t\treturn pcie_cap_offset;\n+\t}\n+\n+\toff = pcie_cap_offset + DLB_PCI_EXP_DEVCTL;\n+\tif (rte_pci_read_config(pdev, &dev_ctl_word, 2, off) != 2)\n+\t\tdev_ctl_word = 0;\n+\n+\toff = pcie_cap_offset + DLB_PCI_LNKCTL;\n+\tif (rte_pci_read_config(pdev, &lnk_word, 2, off) != 2)\n+\t\tlnk_word = 0;\n+\n+\toff = pcie_cap_offset + DLB_PCI_SLTCTL;\n+\tif (rte_pci_read_config(pdev, &slt_word, 2, off) != 2)\n+\t\tslt_word = 0;\n+\n+\toff = pcie_cap_offset + DLB_PCI_RTCTL;\n+\tif (rte_pci_read_config(pdev, &rt_ctl_word, 2, off) != 2)\n+\t\trt_ctl_word = 0;\n+\n+\toff = pcie_cap_offset + DLB_PCI_EXP_DEVCTL2;\n+\tif (rte_pci_read_config(pdev, &dev_ctl2_word, 2, off) != 2)\n+\t\tdev_ctl2_word = 0;\n+\n+\toff = pcie_cap_offset + DLB_PCI_LNKCTL2;\n+\tif (rte_pci_read_config(pdev, &lnk_word2, 2, off) != 2)\n+\t\tlnk_word2 = 0;\n+\n+\toff = pcie_cap_offset + DLB_PCI_SLTCTL2;\n+\tif (rte_pci_read_config(pdev, &slt_word2, 2, off) != 2)\n+\t\tslt_word2 = 0;\n+\n+\tpri_cap_offset = dlb_pci_find_ext_capability(pdev,\n+\t\t\t\t\t\t     DLB_PCI_EXT_CAP_ID_PRI);\n+\tif (pri_cap_offset >= 0) {\n+\t\toff = pri_cap_offset + DLB_PCI_PRI_ALLOC_REQ;\n+\t\tif (rte_pci_read_config(pdev, &pri_reqs_dword, 4, off) != 4)\n+\t\t\tpri_reqs_dword = 0;\n+\t}\n+\n+\t/* clear the PCI command register before issuing the FLR */\n+\n+\toff = DLB_PCI_CMD;\n+\tcmd = 0;\n+\tif (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {\n+\t\tprintf(\"[%s()] failed to write pci config space at offset %d\\n\",\n+\t\t       __func__, (int)off);\n+\t\treturn -1;\n+\t}\n+\n+\t/* issue the FLR */\n+\tfor (wait_count = 0; wait_count < 4; wait_count++) {\n+\t\tint sleep_time;\n+\n+\t\toff = pcie_cap_offset + DLB_PCI_EXP_DEVSTA;\n+\t\tret = rte_pci_read_config(pdev, &devsta_busy_word, 2, off);\n+\t\tif (ret != 2) {\n+\t\t\tprintf(\"[%s()] failed to read the pci device status\\n\",\n+\t\t\t       __func__);\n+\t\t\treturn ret;\n+\t\t}\n+\n+\t\tif (!(devsta_busy_word & DLB_PCI_EXP_DEVSTA_TRPND))\n+\t\t\tbreak;\n+\n+\t\tsleep_time = (1 << (wait_count)) * 100;\n+\t\trte_delay_ms(sleep_time);\n+\t}\n+\n+\tif (wait_count == 4) {\n+\t\tprintf(\"[%s()] wait for pci pending transactions timed out\\n\",\n+\t\t       __func__);\n+\t\treturn -1;\n+\t}\n+\n+\toff = pcie_cap_offset + DLB_PCI_EXP_DEVCTL;\n+\tret = rte_pci_read_config(pdev, &devctl_word, 2, off);\n+\tif (ret != 2) {\n+\t\tprintf(\"[%s()] failed to read the pcie device control\\n\",\n+\t\t       __func__);\n+\t\treturn ret;\n+\t}\n+\n+\tdevctl_word |= DLB_PCI_EXP_DEVCTL_BCR_FLR;\n+\n+\tif (rte_pci_write_config(pdev, &devctl_word, 2, off) != 2) {\n+\t\tprintf(\"[%s()] failed to write the pcie device control at offset %d\\n\",\n+\t\t       __func__, (int)off);\n+\t\treturn -1;\n+\t}\n+\n+\trte_delay_ms(100);\n+\n+\t/* Restore PCI config state */\n+\n+\tif (pcie_cap_offset >= 0) {\n+\t\toff = pcie_cap_offset + DLB_PCI_EXP_DEVCTL;\n+\t\tif (rte_pci_write_config(pdev, &dev_ctl_word, 2, off) != 2) {\n+\t\t\tprintf(\"[%s()] failed to write the pcie device control at offset %d\\n\",\n+\t\t\t       __func__, (int)off);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\toff = pcie_cap_offset + DLB_PCI_LNKCTL;\n+\t\tif (rte_pci_write_config(pdev, &lnk_word, 2, off) != 2) {\n+\t\t\tprintf(\"[%s()] failed to write pci config space at offset %d\\n\",\n+\t\t\t       __func__, (int)off);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\toff = pcie_cap_offset + DLB_PCI_SLTCTL;\n+\t\tif (rte_pci_write_config(pdev, &slt_word, 2, off) != 2) {\n+\t\t\tprintf(\"[%s()] failed to write pci config space at offset %d\\n\",\n+\t\t\t       __func__, (int)off);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\toff = pcie_cap_offset + DLB_PCI_RTCTL;\n+\t\tif (rte_pci_write_config(pdev, &rt_ctl_word, 2, off) != 2) {\n+\t\t\tprintf(\"[%s()] failed to write pci config space at offset %d\\n\",\n+\t\t\t       __func__, (int)off);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\toff = pcie_cap_offset + DLB_PCI_EXP_DEVCTL2;\n+\t\tif (rte_pci_write_config(pdev, &dev_ctl2_word, 2, off) != 2) {\n+\t\t\tprintf(\"[%s()] failed to write pci config space at offset %d\\n\",\n+\t\t\t       __func__, (int)off);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\toff = pcie_cap_offset + DLB_PCI_LNKCTL2;\n+\t\tif (rte_pci_write_config(pdev, &lnk_word2, 2, off) != 2) {\n+\t\t\tprintf(\"[%s()] failed to write pci config space at offset %d\\n\",\n+\t\t\t       __func__, (int)off);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\toff = pcie_cap_offset + DLB_PCI_SLTCTL2;\n+\t\tif (rte_pci_write_config(pdev, &slt_word2, 2, off) != 2) {\n+\t\t\tprintf(\"[%s()] failed to write pci config space at offset %d\\n\",\n+\t\t\t       __func__, (int)off);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\tpasid_cap_offset = dlb_pci_find_ext_capability(pdev,\n+\t\t\t\t\t\t       DLB_PCI_EXT_CAP_ID_PAS);\n+\tif (pasid_cap_offset >= 0) {\n+\t\toff = pasid_cap_offset + DLB_PCI_PASID_CAP;\n+\t\tif (rte_pci_read_config(pdev, &pasid_features, 2, off) != 2)\n+\t\t\tpasid_features = 0;\n+\n+\t\tpasid_features &= DLB_PCI_PASID_CAP_EXEC;\n+\t\tpasid_features &= DLB_PCI_PASID_CAP_PRIV;\n+\t\tpasid_ctrl_word = DLB_PCI_PASID_CTRL_ENABLE | pasid_features;\n+\n+\t\toff = pasid_cap_offset + DLB_PCI_PASID_CTRL;\n+\t\tif (rte_pci_write_config(pdev, &pasid_ctrl_word, 2, off) != 2) {\n+\t\t\tprintf(\"[%s()] failed to write pci config space at offset %d\\n\",\n+\t\t\t       __func__, (int)off);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\tif (pri_cap_offset >= 0) {\n+\t\tpri_ctrl_word = DLB_PCI_PRI_CTRL_ENABLE;\n+\n+\t\toff = pri_cap_offset + DLB_PCI_PRI_ALLOC_REQ;\n+\t\tif (rte_pci_write_config(pdev, &pri_reqs_dword, 4, off) != 4) {\n+\t\t\tprintf(\"[%s()] failed to write pci config space at offset %d\\n\",\n+\t\t\t       __func__, (int)off);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\toff = pri_cap_offset + DLB_PCI_PRI_CTRL;\n+\t\tif (rte_pci_write_config(pdev, &pri_ctrl_word, 2, off) != 2) {\n+\t\t\tprintf(\"[%s()] failed to write pci config space at offset %d\\n\",\n+\t\t\t       __func__, (int)off);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\terr_cap_offset = dlb_pci_find_ext_capability(pdev,\n+\t\t\t\t\t\t     DLB_PCI_EXT_CAP_ID_ERR);\n+\tif (err_cap_offset >= 0) {\n+\t\tuint32_t tmp;\n+\n+\t\toff = err_cap_offset + DLB_PCI_ERR_ROOT_STATUS;\n+\t\tif (rte_pci_read_config(pdev, &tmp, 4, off) != 4)\n+\t\t\ttmp = 0;\n+\n+\t\tif (rte_pci_write_config(pdev, &tmp, 4, off) != 4) {\n+\t\t\tprintf(\"[%s()] failed to write pci config space at offset %d\\n\",\n+\t\t\t       __func__, (int)off);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\toff = err_cap_offset + DLB_PCI_ERR_COR_STATUS;\n+\t\tif (rte_pci_read_config(pdev, &tmp, 4, off) != 4)\n+\t\t\ttmp = 0;\n+\n+\t\tif (rte_pci_write_config(pdev, &tmp, 4, off) != 4) {\n+\t\t\tprintf(\"[%s()] failed to write pci config space at offset %d\\n\",\n+\t\t\t       __func__, (int)off);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\toff = err_cap_offset + DLB_PCI_ERR_UNCOR_STATUS;\n+\t\tif (rte_pci_read_config(pdev, &tmp, 4, off) != 4)\n+\t\t\ttmp = 0;\n+\n+\t\tif (rte_pci_write_config(pdev, &tmp, 4, off) != 4) {\n+\t\t\tprintf(\"[%s()] failed to write pci config space at offset %d\\n\",\n+\t\t\t       __func__, (int)off);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\tfor (i = 16; i > 0; i--) {\n+\t\toff = (i - 1) * 4;\n+\t\tif (rte_pci_write_config(pdev, &dword[i - 1], 4, off) != 4) {\n+\t\t\tprintf(\"[%s()] failed to write pci config space at offset %d\\n\",\n+\t\t\t       __func__, (int)off);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\toff = DLB_PCI_CMD;\n+\tif (rte_pci_read_config(pdev, &cmd, 2, off) == 2) {\n+\t\tcmd &= ~DLB_PCI_COMMAND_INTX_DISABLE;\n+\t\tif (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {\n+\t\t\tprintf(\"[%s()] failed to write pci config space\\n\",\n+\t\t\t       __func__);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\tmsix_cap_offset = dlb_pci_find_capability(pdev, DLB_PCI_CAP_ID_MSIX);\n+\tif (msix_cap_offset >= 0) {\n+\t\toff = msix_cap_offset + DLB_PCI_MSIX_FLAGS;\n+\t\tif (rte_pci_read_config(pdev, &cmd, 2, off) == 2) {\n+\t\t\tcmd |= DLB_PCI_MSIX_FLAGS_ENABLE;\n+\t\t\tcmd |= DLB_PCI_MSIX_FLAGS_MASKALL;\n+\t\t\tif (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {\n+\t\t\t\tprintf(\"[%s()] failed to write msix flags\\n\",\n+\t\t\t\t       __func__);\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t}\n+\n+\t\toff = msix_cap_offset + DLB_PCI_MSIX_FLAGS;\n+\t\tif (rte_pci_read_config(pdev, &cmd, 2, off) == 2) {\n+\t\t\tcmd &= ~DLB_PCI_MSIX_FLAGS_MASKALL;\n+\t\t\tif (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {\n+\t\t\t\tprintf(\"[%s()] failed to write msix flags\\n\",\n+\t\t\t\t       __func__);\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\tacs_cap_offset = dlb_pci_find_ext_capability(pdev,\n+\t\t\t\t\t\t     DLB_PCI_EXT_CAP_ID_ACS);\n+\tif (acs_cap_offset >= 0) {\n+\t\tuint16_t acs_cap, acs_ctrl, acs_mask;\n+\t\toff = acs_cap_offset + DLB_PCI_ACS_CAP;\n+\t\tif (rte_pci_read_config(pdev, &acs_cap, 2, off) != 2)\n+\t\t\tacs_cap = 0;\n+\n+\t\toff = acs_cap_offset + DLB_PCI_ACS_CTRL;\n+\t\tif (rte_pci_read_config(pdev, &acs_ctrl, 2, off) != 2)\n+\t\t\tacs_ctrl = 0;\n+\n+\t\tacs_mask = DLB_PCI_ACS_SV | DLB_PCI_ACS_RR;\n+\t\tacs_mask |= (DLB_PCI_ACS_CR | DLB_PCI_ACS_UF);\n+\t\tacs_ctrl |= (acs_cap & acs_mask);\n+\n+\t\tif (rte_pci_write_config(pdev, &acs_ctrl, 2, off) != 2) {\n+\t\t\tprintf(\"[%s()] failed to write pci config space at offset %d\\n\",\n+\t\t\t       __func__, (int)off);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\toff = acs_cap_offset + DLB_PCI_ACS_CTRL;\n+\t\tif (rte_pci_read_config(pdev, &acs_ctrl, 2, off) != 2)\n+\t\t\tacs_ctrl = 0;\n+\n+\t\tacs_mask = DLB_PCI_ACS_RR | DLB_PCI_ACS_CR | DLB_PCI_ACS_EC;\n+\t\tacs_ctrl &= ~acs_mask;\n+\n+\t\toff = acs_cap_offset + DLB_PCI_ACS_CTRL;\n+\t\tif (rte_pci_write_config(pdev, &acs_ctrl, 2, off) != 2) {\n+\t\t\tprintf(\"[%s()] failed to write pci config space at offset %d\\n\",\n+\t\t\t       __func__, (int)off);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/*******************************/\n+/****** Driver management ******/\n+/*******************************/\n+\n+int\n+dlb_pf_init_driver_state(struct dlb_dev *dlb_dev)\n+{\n+\tif (rte_cpu_get_flag_enabled(RTE_CPUFLAG_MOVDIR64B))\n+\t\tdlb_dev->enqueue_four = dlb_movdir64b;\n+\telse\n+\t\tdlb_dev->enqueue_four = dlb_movntdq;\n+\n+\t/* Initialize software state */\n+\trte_spinlock_init(&dlb_dev->resource_mutex);\n+\trte_spinlock_init(&dlb_dev->measurement_lock);\n+\n+\treturn 0;\n+}\n+\n+void\n+dlb_pf_init_hardware(struct dlb_dev *dlb_dev)\n+{\n+\tdlb_disable_dp_vasr_feature(&dlb_dev->hw);\n+\n+\tdlb_enable_excess_tokens_alarm(&dlb_dev->hw);\n+\n+\tif (dlb_dev->revision >= DLB_REV_B0) {\n+\t\tdlb_hw_enable_sparse_ldb_cq_mode(&dlb_dev->hw);\n+\t\tdlb_hw_enable_sparse_dir_cq_mode(&dlb_dev->hw);\n+\t}\n+\n+\tif (dlb_dev->revision >= DLB_REV_B0) {\n+\t\tdlb_hw_disable_pf_to_vf_isr_pend_err(&dlb_dev->hw);\n+\t\tdlb_hw_disable_vf_to_pf_isr_pend_err(&dlb_dev->hw);\n+\t}\n+}\ndiff --git a/drivers/event/dlb/pf/dlb_main.h b/drivers/event/dlb/pf/dlb_main.h\nnew file mode 100644\nindex 0000000..5fbfcea\n--- /dev/null\n+++ b/drivers/event/dlb/pf/dlb_main.h\n@@ -0,0 +1,54 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2016-2020 Intel Corporation\n+ */\n+\n+#ifndef __DLB_MAIN_H\n+#define __DLB_MAIN_H\n+\n+#include <rte_debug.h>\n+#include <rte_log.h>\n+#include <rte_spinlock.h>\n+#include <rte_pci.h>\n+#include <rte_bus_pci.h>\n+\n+#ifndef PAGE_SIZE\n+#define PAGE_SIZE (sysconf(_SC_PAGESIZE))\n+#endif\n+\n+#include \"base/dlb_hw_types.h\"\n+#include \"../dlb_user.h\"\n+\n+#define DLB_DEFAULT_UNREGISTER_TIMEOUT_S 5\n+\n+struct dlb_dev;\n+\n+struct dlb_dev {\n+\tstruct rte_pci_device *pdev;\n+\tstruct dlb_hw hw;\n+\t/* struct list_head list; */\n+\tstruct device *dlb_device;\n+\t/* The enqueue_four function enqueues four HCWs (one cache-line worth)\n+\t * to the DLB, using whichever mechanism is supported by the platform\n+\t * on which this driver is running.\n+\t */\n+\tvoid (*enqueue_four)(void *qe4, void *pp_addr);\n+\tbool domain_reset_failed;\n+\t/* The resource mutex serializes access to driver data structures and\n+\t * hardware registers.\n+\t */\n+\trte_spinlock_t resource_mutex;\n+\trte_spinlock_t measurement_lock;\n+\tbool worker_launched;\n+\tu8 revision;\n+};\n+\n+struct dlb_dev *dlb_probe(struct rte_pci_device *pdev);\n+void dlb_reset_done(struct dlb_dev *dlb_dev);\n+\n+/* pf_ops */\n+int dlb_pf_init_driver_state(struct dlb_dev *dev);\n+void dlb_pf_free_driver_state(struct dlb_dev *dev);\n+void dlb_pf_init_hardware(struct dlb_dev *dev);\n+int dlb_pf_reset(struct dlb_dev *dlb_dev);\n+\n+#endif /* __DLB_MAIN_H */\ndiff --git a/drivers/event/dlb/pf/dlb_pf.c b/drivers/event/dlb/pf/dlb_pf.c\nnew file mode 100644\nindex 0000000..329497d\n--- /dev/null\n+++ b/drivers/event/dlb/pf/dlb_pf.c\n@@ -0,0 +1,782 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2016-2020 Intel Corporation\n+ */\n+\n+#include <stdint.h>\n+#include <stdbool.h>\n+#include <stdio.h>\n+#include <sys/mman.h>\n+#include <sys/fcntl.h>\n+#include <sys/time.h>\n+#include <errno.h>\n+#include <assert.h>\n+#include <unistd.h>\n+#include <string.h>\n+#include <rte_debug.h>\n+#include <rte_log.h>\n+#include <rte_dev.h>\n+#include <rte_devargs.h>\n+#include <rte_mbuf.h>\n+#include <rte_ring.h>\n+#include <rte_errno.h>\n+#include <rte_kvargs.h>\n+#include <rte_malloc.h>\n+#include <rte_cycles.h>\n+#include <rte_io.h>\n+#include <rte_pci.h>\n+#include <rte_bus_pci.h>\n+#include <rte_eventdev.h>\n+#include <rte_eventdev_pmd.h>\n+#include <rte_eventdev_pmd_pci.h>\n+#include <rte_memory.h>\n+#include <rte_string_fns.h>\n+\n+#include \"../dlb_priv.h\"\n+#include \"../dlb_iface.h\"\n+#include \"../dlb_inline_fns.h\"\n+#include \"dlb_main.h\"\n+#include \"base/dlb_hw_types.h\"\n+#include \"base/dlb_osdep.h\"\n+#include \"base/dlb_resource.h\"\n+\n+extern struct dlb_dev *dlb_probe(struct rte_pci_device *pdev);\n+extern struct process_local_port_data dlb_port[][NUM_DLB_PORT_TYPES];\n+\n+static const char *event_dlb_pf_name = EVDEV_DLB_NAME_PMD_STR;\n+\n+static void\n+dlb_pf_low_level_io_init(struct dlb_eventdev *dlb __rte_unused)\n+{\n+\tint i;\n+\n+\t/* Addresses will be initialized at port create */\n+\tfor (i = 0; i < DLB_MAX_NUM_PORTS; i++) {\n+\t\t/* First directed ports */\n+\n+\t\t/* producer port */\n+\t\tdlb_port[i][DLB_DIR].pp_addr = NULL;\n+\n+\t\t/* popcount */\n+\t\tdlb_port[i][DLB_DIR].ldb_popcount = NULL;\n+\t\tdlb_port[i][DLB_DIR].dir_popcount = NULL;\n+\n+\t\t/* consumer queue */\n+\t\tdlb_port[i][DLB_DIR].cq_base = NULL;\n+\t\tdlb_port[i][DLB_DIR].mmaped = true;\n+\n+\t\t/* Now load balanced ports */\n+\n+\t\t/* producer port */\n+\t\tdlb_port[i][DLB_LDB].pp_addr = NULL;\n+\n+\t\t/* popcount */\n+\t\tdlb_port[i][DLB_LDB].ldb_popcount = NULL;\n+\t\tdlb_port[i][DLB_LDB].dir_popcount = NULL;\n+\n+\t\t/* consumer queue */\n+\t\tdlb_port[i][DLB_LDB].cq_base = NULL;\n+\t\tdlb_port[i][DLB_LDB].mmaped = true;\n+\t}\n+}\n+\n+static int\n+dlb_pf_open(struct dlb_hw_dev *handle, const char *name)\n+{\n+\tRTE_SET_USED(handle);\n+\tRTE_SET_USED(name);\n+\n+\treturn 0;\n+}\n+\n+static void\n+dlb_pf_domain_close(struct dlb_eventdev *dlb)\n+{\n+\tstruct dlb_dev *dlb_dev = (struct dlb_dev *)dlb->qm_instance.pf_dev;\n+\tint ret;\n+\n+\tret = dlb_reset_domain(&dlb_dev->hw,\n+\t\t\t       dlb->qm_instance.domain_id,\n+\t\t\t       false,\n+\t\t\t       0);\n+\tif (ret)\n+\t\tDLB_LOG_ERR(\"dlb_pf_reset_domain err %d\", ret);\n+}\n+\n+static int\n+dlb_pf_get_device_version(struct dlb_hw_dev *handle,\n+\t\t\t  uint8_t *revision)\n+{\n+\tstruct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;\n+\n+\t*revision = dlb_dev->revision;\n+\n+\treturn 0;\n+}\n+\n+#define PF_ID_ZERO 0\t/* PF ONLY! */\n+#define NO_OWNER_VF 0\t/* PF ONLY! */\n+#define NOT_VF_REQ false /* PF ONLY! */\n+\n+static int\n+dlb_pf_get_num_resources(struct dlb_hw_dev *handle,\n+\t\t\t struct dlb_get_num_resources_args *rsrcs)\n+{\n+\tstruct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;\n+\n+\treturn dlb_hw_get_num_resources(&dlb_dev->hw, rsrcs, false, 0);\n+}\n+\n+static int\n+dlb_pf_sched_domain_create(struct dlb_hw_dev *handle,\n+\t\t\t   struct dlb_create_sched_domain_args *arg)\n+{\n+\tstruct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;\n+\tstruct dlb_cmd_response response = {0};\n+\tint ret;\n+\n+\tDLB_INFO(dev->dlb_device, \"Entering %s()\\n\", __func__);\n+\n+\tif (dlb_dev->domain_reset_failed) {\n+\t\tresponse.status = DLB_ST_DOMAIN_RESET_FAILED;\n+\t\tret = -EINVAL;\n+\t\tgoto done;\n+\t}\n+\n+\tret = dlb_hw_create_sched_domain(&dlb_dev->hw, arg, &response,\n+\t\t\t\t\t NOT_VF_REQ, PF_ID_ZERO);\n+\tif (ret)\n+\t\tgoto done;\n+\n+done:\n+\n+\t*(struct dlb_cmd_response *)arg->response = response;\n+\n+\tDLB_INFO(dev->dlb_device, \"Exiting %s() with ret=%d\\n\", __func__, ret);\n+\n+\treturn ret;\n+}\n+\n+static int\n+dlb_pf_ldb_credit_pool_create(struct dlb_hw_dev *handle,\n+\t\t\t      struct dlb_create_ldb_pool_args *cfg)\n+{\n+\tstruct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;\n+\tstruct dlb_cmd_response response = {0};\n+\tint ret;\n+\n+\tDLB_INFO(dev->dlb_device, \"Entering %s()\\n\", __func__);\n+\n+\tret = dlb_hw_create_ldb_pool(&dlb_dev->hw,\n+\t\t\t\t     handle->domain_id,\n+\t\t\t\t     cfg,\n+\t\t\t\t     &response,\n+\t\t\t\t     NOT_VF_REQ,\n+\t\t\t\t     PF_ID_ZERO);\n+\n+\t*(struct dlb_cmd_response *)cfg->response = response;\n+\n+\tDLB_INFO(dev->dlb_device, \"Exiting %s() with ret=%d\\n\", __func__, ret);\n+\n+\treturn ret;\n+}\n+\n+static int\n+dlb_pf_dir_credit_pool_create(struct dlb_hw_dev *handle,\n+\t\t\t      struct dlb_create_dir_pool_args *cfg)\n+{\n+\tstruct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;\n+\tstruct dlb_cmd_response response = {0};\n+\tint ret;\n+\n+\tDLB_INFO(dev->dlb_device, \"Entering %s()\\n\", __func__);\n+\n+\tret = dlb_hw_create_dir_pool(&dlb_dev->hw,\n+\t\t\t\t     handle->domain_id,\n+\t\t\t\t     cfg,\n+\t\t\t\t     &response,\n+\t\t\t\t     NOT_VF_REQ,\n+\t\t\t\t     PF_ID_ZERO);\n+\n+\t*(struct dlb_cmd_response *)cfg->response = response;\n+\n+\tDLB_INFO(dev->dlb_device, \"Exiting %s() with ret=%d\\n\", __func__, ret);\n+\n+\treturn ret;\n+}\n+\n+static int\n+dlb_pf_ldb_queue_create(struct dlb_hw_dev *handle,\n+\t\t\tstruct dlb_create_ldb_queue_args *cfg)\n+{\n+\tstruct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;\n+\tstruct dlb_cmd_response response = {0};\n+\tint ret;\n+\n+\tDLB_INFO(dev->dlb_device, \"Entering %s()\\n\", __func__);\n+\n+\tret = dlb_hw_create_ldb_queue(&dlb_dev->hw,\n+\t\t\t\t      handle->domain_id,\n+\t\t\t\t      cfg,\n+\t\t\t\t      &response,\n+\t\t\t\t      NOT_VF_REQ,\n+\t\t\t\t      PF_ID_ZERO);\n+\n+\t*(struct dlb_cmd_response *)cfg->response = response;\n+\n+\tDLB_INFO(dev->dlb_device, \"Exiting %s() with ret=%d\\n\", __func__, ret);\n+\n+\treturn ret;\n+}\n+\n+static int\n+dlb_pf_dir_queue_create(struct dlb_hw_dev *handle,\n+\t\t\tstruct dlb_create_dir_queue_args *cfg)\n+{\n+\tstruct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;\n+\tstruct dlb_cmd_response response = {0};\n+\tint ret;\n+\n+\tDLB_INFO(dev->dlb_device, \"Entering %s()\\n\", __func__);\n+\n+\tret = dlb_hw_create_dir_queue(&dlb_dev->hw,\n+\t\t\t\t      handle->domain_id,\n+\t\t\t\t      cfg,\n+\t\t\t\t      &response,\n+\t\t\t\t      NOT_VF_REQ,\n+\t\t\t\t      PF_ID_ZERO);\n+\n+\t*(struct dlb_cmd_response *)cfg->response = response;\n+\n+\tDLB_INFO(dev->dlb_device, \"Exiting %s() with ret=%d\\n\", __func__, ret);\n+\n+\treturn ret;\n+}\n+\n+static void *\n+dlb_alloc_coherent_aligned(rte_iova_t *phys, size_t size, int align)\n+{\n+\tconst struct rte_memzone *mz;\n+\tchar mz_name[RTE_MEMZONE_NAMESIZE];\n+\tuint32_t core_id = rte_lcore_id();\n+\tunsigned int socket_id;\n+\n+\tsnprintf(mz_name, sizeof(mz_name) - 1, \"%lx\",\n+\t\t (unsigned long)rte_get_timer_cycles());\n+\tif (core_id == (unsigned int)LCORE_ID_ANY)\n+\t\tcore_id = rte_get_master_lcore();\n+\tsocket_id = rte_lcore_to_socket_id(core_id);\n+\tmz = rte_memzone_reserve_aligned(mz_name, size, socket_id,\n+\t\t\t\t\t RTE_MEMZONE_IOVA_CONTIG, align);\n+\tif (!mz) {\n+\t\trte_panic(\"Unable to allocate DMA memory of size %zu bytes - %s\\n\",\n+\t\t\t  size, rte_strerror(rte_errno));\n+\t\t*phys = 0;\n+\t\treturn NULL;\n+\t}\n+\t*phys = mz->iova;\n+\treturn mz->addr;\n+}\n+\n+static int\n+dlb_pf_ldb_port_create(struct dlb_hw_dev *handle,\n+\t\t       struct dlb_create_ldb_port_args *cfg,\n+\t\t       enum dlb_cq_poll_modes poll_mode)\n+{\n+\tstruct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;\n+\tstruct dlb_cmd_response response = {0};\n+\tint ret;\n+\tuint8_t *port_base;\n+\tint alloc_sz, qe_sz, cq_alloc_depth;\n+\trte_iova_t pp_dma_base;\n+\trte_iova_t pc_dma_base;\n+\trte_iova_t cq_dma_base;\n+\tint is_dir = false;\n+\n+\tDLB_INFO(dev->dlb_device, \"Entering %s()\\n\", __func__);\n+\n+\tif (poll_mode == DLB_CQ_POLL_MODE_STD)\n+\t\tqe_sz = sizeof(struct dlb_dequeue_qe);\n+\telse\n+\t\tqe_sz = RTE_CACHE_LINE_SIZE;\n+\n+\t/* The hardware always uses a CQ depth of at least\n+\t * DLB_MIN_HARDWARE_CQ_DEPTH, even though from the user\n+\t * perspective we support a depth as low as 1 for LDB ports.\n+\t */\n+\tcq_alloc_depth = RTE_MAX(cfg->cq_depth, DLB_MIN_HARDWARE_CQ_DEPTH);\n+\n+\t/* Calculate the port memory required, including two cache lines for\n+\t * credit pop counts. Round up to the nearest cache line.\n+\t */\n+\talloc_sz = 2 * RTE_CACHE_LINE_SIZE + cq_alloc_depth * qe_sz;\n+\talloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);\n+\n+\tport_base = dlb_alloc_coherent_aligned(&pc_dma_base,\n+\t\t\t\t\t       alloc_sz,\n+\t\t\t\t\t       PAGE_SIZE);\n+\tif (port_base == NULL)\n+\t\treturn -ENOMEM;\n+\n+\t/* Lock the page in memory */\n+\tret = rte_mem_lock_page(port_base);\n+\tif (ret < 0)\n+\t\trte_panic(\"dlb pf pmd could not lock page for device i/o\\n\");\n+\n+\tmemset(port_base, 0, alloc_sz);\n+\tcq_dma_base = (uintptr_t)(pc_dma_base + (2 * RTE_CACHE_LINE_SIZE));\n+\n+\tret = dlb_hw_create_ldb_port(&dlb_dev->hw,\n+\t\t\t\t     handle->domain_id,\n+\t\t\t\t     cfg,\n+\t\t\t\t     pc_dma_base,\n+\t\t\t\t     cq_dma_base,\n+\t\t\t\t     &response,\n+\t\t\t\t     NOT_VF_REQ,\n+\t\t\t\t     PF_ID_ZERO);\n+\tif (ret)\n+\t\tgoto create_port_err;\n+\n+\tpp_dma_base = (uintptr_t)dlb_dev->hw.func_kva + PP_BASE(is_dir);\n+\tdlb_port[response.id][DLB_LDB].pp_addr =\n+\t\t(void *)(uintptr_t)(pp_dma_base + (PAGE_SIZE * response.id));\n+\n+\tdlb_port[response.id][DLB_LDB].cq_base =\n+\t\t(void *)(uintptr_t)(port_base + (2 * RTE_CACHE_LINE_SIZE));\n+\n+\tdlb_port[response.id][DLB_LDB].ldb_popcount =\n+\t\t(void *)(uintptr_t)port_base;\n+\tdlb_port[response.id][DLB_LDB].dir_popcount = (void *)(uintptr_t)\n+\t\t(port_base + RTE_CACHE_LINE_SIZE);\n+\n+\t*(struct dlb_cmd_response *)cfg->response = response;\n+\n+\tDLB_INFO(dev->dlb_device, \"Exiting %s() with ret=%d\\n\", __func__, ret);\n+\n+create_port_err:\n+\n+\treturn ret;\n+}\n+\n+static int\n+dlb_pf_dir_port_create(struct dlb_hw_dev *handle,\n+\t\t       struct dlb_create_dir_port_args *cfg,\n+\t\t       enum dlb_cq_poll_modes poll_mode)\n+{\n+\tstruct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;\n+\tstruct dlb_cmd_response response = {0};\n+\tint ret;\n+\tuint8_t *port_base;\n+\tint alloc_sz, qe_sz;\n+\trte_iova_t pp_dma_base;\n+\trte_iova_t pc_dma_base;\n+\trte_iova_t cq_dma_base;\n+\tint is_dir = true;\n+\n+\tDLB_INFO(dev->dlb_device, \"Entering %s()\\n\", __func__);\n+\n+\tif (poll_mode == DLB_CQ_POLL_MODE_STD)\n+\t\tqe_sz = sizeof(struct dlb_dequeue_qe);\n+\telse\n+\t\tqe_sz = RTE_CACHE_LINE_SIZE;\n+\n+\t/* Calculate the port memory required, including two cache lines for\n+\t * credit pop counts. Round up to the nearest cache line.\n+\t */\n+\talloc_sz = 2 * RTE_CACHE_LINE_SIZE + cfg->cq_depth * qe_sz;\n+\talloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);\n+\n+\tport_base = dlb_alloc_coherent_aligned(&pc_dma_base,\n+\t\t\t\t\t       alloc_sz,\n+\t\t\t\t\t       PAGE_SIZE);\n+\tif (port_base == NULL)\n+\t\treturn -ENOMEM;\n+\n+\t/* Lock the page in memory */\n+\tret = rte_mem_lock_page(port_base);\n+\tif (ret < 0)\n+\t\trte_panic(\"dlb pf pmd could not lock page for device i/o\\n\");\n+\n+\tmemset(port_base, 0, alloc_sz);\n+\tcq_dma_base = (uintptr_t)(pc_dma_base + (2 * RTE_CACHE_LINE_SIZE));\n+\n+\tret = dlb_hw_create_dir_port(&dlb_dev->hw,\n+\t\t\t\t     handle->domain_id,\n+\t\t\t\t     cfg,\n+\t\t\t\t     pc_dma_base,\n+\t\t\t\t     cq_dma_base,\n+\t\t\t\t     &response,\n+\t\t\t\t     NOT_VF_REQ,\n+\t\t\t\t     PF_ID_ZERO);\n+\tif (ret)\n+\t\tgoto create_port_err;\n+\n+\tpp_dma_base = (uintptr_t)dlb_dev->hw.func_kva + PP_BASE(is_dir);\n+\tdlb_port[response.id][DLB_DIR].pp_addr =\n+\t\t(void *)(uintptr_t)(pp_dma_base + (PAGE_SIZE * response.id));\n+\n+\tdlb_port[response.id][DLB_DIR].cq_base =\n+\t\t(void *)(uintptr_t)(port_base + (2 * RTE_CACHE_LINE_SIZE));\n+\n+\tdlb_port[response.id][DLB_DIR].ldb_popcount =\n+\t\t(void *)(uintptr_t)port_base;\n+\tdlb_port[response.id][DLB_DIR].dir_popcount = (void *)(uintptr_t)\n+\t\t(port_base + RTE_CACHE_LINE_SIZE);\n+\n+\t*(struct dlb_cmd_response *)cfg->response = response;\n+\n+\tDLB_INFO(dev->dlb_device, \"Exiting %s() with ret=%d\\n\", __func__, ret);\n+\n+create_port_err:\n+\n+\treturn ret;\n+}\n+\n+static int\n+dlb_pf_map_qid(struct dlb_hw_dev *handle,\n+\t       struct dlb_map_qid_args *cfg)\n+{\n+\tstruct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;\n+\tstruct dlb_cmd_response response = {0};\n+\tint ret;\n+\n+\tDLB_INFO(dev->dlb_device, \"Entering %s()\\n\", __func__);\n+\n+\tret = dlb_hw_map_qid(&dlb_dev->hw,\n+\t\t\t     handle->domain_id,\n+\t\t\t     cfg,\n+\t\t\t     &response,\n+\t\t\t     NOT_VF_REQ,\n+\t\t\t     PF_ID_ZERO);\n+\n+\t*(struct dlb_cmd_response *)cfg->response = response;\n+\n+\tDLB_INFO(dev->dlb_device, \"Exiting %s() with ret=%d\\n\", __func__, ret);\n+\n+\treturn ret;\n+}\n+\n+static int\n+dlb_pf_unmap_qid(struct dlb_hw_dev *handle,\n+\t\t struct dlb_unmap_qid_args *cfg)\n+{\n+\tstruct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;\n+\tstruct dlb_cmd_response response = {0};\n+\tint ret;\n+\n+\tDLB_INFO(dev->dlb_device, \"Entering %s()\\n\", __func__);\n+\n+\tret = dlb_hw_unmap_qid(&dlb_dev->hw,\n+\t\t\t       handle->domain_id,\n+\t\t\t       cfg,\n+\t\t\t       &response,\n+\t\t\t       NOT_VF_REQ,\n+\t\t\t       PF_ID_ZERO);\n+\n+\t*(struct dlb_cmd_response *)cfg->response = response;\n+\n+\tDLB_INFO(dev->dlb_device, \"Exiting %s() with ret=%d\\n\", __func__, ret);\n+\n+\treturn ret;\n+}\n+\n+static int\n+dlb_pf_sched_domain_start(struct dlb_hw_dev *handle,\n+\t\t\t  struct dlb_start_domain_args *cfg)\n+{\n+\tstruct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;\n+\tstruct dlb_cmd_response response = {0};\n+\tint ret;\n+\n+\tDLB_INFO(dev->dlb_device, \"Entering %s()\\n\", __func__);\n+\n+\tret = dlb_hw_start_domain(&dlb_dev->hw,\n+\t\t\t\t  handle->domain_id,\n+\t\t\t\t  cfg,\n+\t\t\t\t  &response,\n+\t\t\t\t  NOT_VF_REQ,\n+\t\t\t\t  PF_ID_ZERO);\n+\n+\t*(struct dlb_cmd_response *)cfg->response = response;\n+\n+\tDLB_INFO(dev->dlb_device, \"Exiting %s() with ret=%d\\n\", __func__, ret);\n+\n+\treturn ret;\n+}\n+\n+static int\n+dlb_pf_pending_port_unmaps(struct dlb_hw_dev *handle,\n+\t\t\t   struct dlb_pending_port_unmaps_args *args)\n+{\n+\tstruct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;\n+\tstruct dlb_cmd_response response = {0};\n+\tint ret;\n+\n+\tDLB_INFO(dev->dlb_device, \"Entering %s()\\n\", __func__);\n+\n+\tret = dlb_hw_pending_port_unmaps(&dlb_dev->hw,\n+\t\t\t\t\t handle->domain_id,\n+\t\t\t\t\t args,\n+\t\t\t\t\t &response,\n+\t\t\t\t\t NOT_VF_REQ,\n+\t\t\t\t\t PF_ID_ZERO);\n+\n+\t*(struct dlb_cmd_response *)args->response = response;\n+\n+\tDLB_INFO(dev->dlb_device, \"Exiting %s() with ret=%d\\n\", __func__, ret);\n+\n+\treturn ret;\n+}\n+\n+static int\n+dlb_pf_get_ldb_queue_depth(struct dlb_hw_dev *handle,\n+\t\t\t   struct dlb_get_ldb_queue_depth_args *args)\n+{\n+\tstruct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;\n+\tstruct dlb_cmd_response response = {0};\n+\tint ret;\n+\n+\tDLB_INFO(dev->dlb_device, \"Entering %s()\\n\", __func__);\n+\n+\tret = dlb_hw_get_ldb_queue_depth(&dlb_dev->hw,\n+\t\t\t\t\t handle->domain_id,\n+\t\t\t\t\t args,\n+\t\t\t\t\t &response,\n+\t\t\t\t\t NOT_VF_REQ,\n+\t\t\t\t\t PF_ID_ZERO);\n+\n+\t*(struct dlb_cmd_response *)args->response = response;\n+\n+\tDLB_INFO(dev->dlb_device, \"Exiting %s() with ret=%d\\n\", __func__, ret);\n+\n+\treturn ret;\n+}\n+\n+static int\n+dlb_pf_get_dir_queue_depth(struct dlb_hw_dev *handle,\n+\t\t\t   struct dlb_get_dir_queue_depth_args *args)\n+{\n+\tstruct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;\n+\tstruct dlb_cmd_response response = {0};\n+\tint ret = 0;\n+\n+\tDLB_INFO(dev->dlb_device, \"Entering %s()\\n\", __func__);\n+\n+\tret = dlb_hw_get_dir_queue_depth(&dlb_dev->hw,\n+\t\t\t\t\t handle->domain_id,\n+\t\t\t\t\t args,\n+\t\t\t\t\t &response,\n+\t\t\t\t\t NOT_VF_REQ,\n+\t\t\t\t\t PF_ID_ZERO);\n+\n+\t*(struct dlb_cmd_response *)args->response = response;\n+\n+\tDLB_INFO(dev->dlb_device, \"Exiting %s() with ret=%d\\n\", __func__, ret);\n+\n+\treturn ret;\n+}\n+\n+static int\n+dlb_pf_get_cq_poll_mode(struct dlb_hw_dev *handle,\n+\t\t\tenum dlb_cq_poll_modes *mode)\n+{\n+\tstruct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;\n+\n+\tif (dlb_dev->revision >= DLB_REV_B0)\n+\t\t*mode = DLB_CQ_POLL_MODE_SPARSE;\n+\telse\n+\t\t*mode = DLB_CQ_POLL_MODE_STD;\n+\n+\treturn 0;\n+}\n+\n+static int\n+dlb_pf_get_sn_allocation(struct dlb_hw_dev *handle,\n+\t\t\t struct dlb_get_sn_allocation_args *args)\n+{\n+\tstruct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;\n+\tstruct dlb_cmd_response response = {0};\n+\tint ret;\n+\n+\tret = dlb_get_group_sequence_numbers(&dlb_dev->hw, args->group);\n+\n+\tresponse.id = ret;\n+\tresponse.status = 0;\n+\n+\t*(struct dlb_cmd_response *)args->response = response;\n+\n+\treturn ret;\n+}\n+\n+static int\n+dlb_pf_set_sn_allocation(struct dlb_hw_dev *handle,\n+\t\t\t struct dlb_set_sn_allocation_args *args)\n+{\n+\tstruct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;\n+\tstruct dlb_cmd_response response = {0};\n+\tint ret;\n+\n+\tret = dlb_set_group_sequence_numbers(&dlb_dev->hw, args->group,\n+\t\t\t\t\t     args->num);\n+\n+\tresponse.status = 0;\n+\n+\t*(struct dlb_cmd_response *)args->response = response;\n+\n+\treturn ret;\n+}\n+\n+static int\n+dlb_pf_get_sn_occupancy(struct dlb_hw_dev *handle,\n+\t\t\tstruct dlb_get_sn_occupancy_args *args)\n+{\n+\tstruct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;\n+\tstruct dlb_cmd_response response = {0};\n+\tint ret;\n+\n+\tret = dlb_get_group_sequence_number_occupancy(&dlb_dev->hw,\n+\t\t\t\t\t\t      args->group);\n+\n+\tresponse.id = ret;\n+\tresponse.status = 0;\n+\n+\t*(struct dlb_cmd_response *)args->response = response;\n+\n+\treturn ret;\n+}\n+\n+static void\n+dlb_pf_iface_fn_ptrs_init(void)\n+{\n+\tdlb_iface_low_level_io_init = dlb_pf_low_level_io_init;\n+\tdlb_iface_open = dlb_pf_open;\n+\tdlb_iface_domain_close = dlb_pf_domain_close;\n+\tdlb_iface_get_driver_version = NULL; /*dlb_pf_get_driver_version;*/\n+\tdlb_iface_get_device_version = dlb_pf_get_device_version;\n+\tdlb_iface_get_num_resources = dlb_pf_get_num_resources;\n+\tdlb_iface_sched_domain_create = dlb_pf_sched_domain_create;\n+\tdlb_iface_ldb_credit_pool_create = dlb_pf_ldb_credit_pool_create;\n+\tdlb_iface_dir_credit_pool_create = dlb_pf_dir_credit_pool_create;\n+\tdlb_iface_ldb_queue_create = dlb_pf_ldb_queue_create;\n+\tdlb_iface_dir_queue_create = dlb_pf_dir_queue_create;\n+\tdlb_iface_ldb_port_create = dlb_pf_ldb_port_create;\n+\tdlb_iface_dir_port_create = dlb_pf_dir_port_create;\n+\tdlb_iface_map_qid = dlb_pf_map_qid;\n+\tdlb_iface_unmap_qid = dlb_pf_unmap_qid;\n+\tdlb_iface_sched_domain_start = dlb_pf_sched_domain_start;\n+\tdlb_iface_pending_port_unmaps = dlb_pf_pending_port_unmaps;\n+\tdlb_iface_get_ldb_queue_depth = dlb_pf_get_ldb_queue_depth;\n+\tdlb_iface_get_dir_queue_depth = dlb_pf_get_dir_queue_depth;\n+\tdlb_iface_get_cq_poll_mode = dlb_pf_get_cq_poll_mode;\n+\tdlb_iface_get_sn_allocation = dlb_pf_get_sn_allocation;\n+\tdlb_iface_set_sn_allocation = dlb_pf_set_sn_allocation;\n+\tdlb_iface_get_sn_occupancy = dlb_pf_get_sn_occupancy;\n+}\n+\n+/* PCI DEV HOOKS */\n+static int\n+dlb_eventdev_pci_init(struct rte_eventdev *eventdev)\n+{\n+\tint ret = 0;\n+\tstruct rte_pci_device *pci_dev;\n+\tstruct dlb_devargs dlb_args = {\n+\t\t.socket_id = rte_socket_id(),\n+\t\t.max_num_events = DLB_MAX_NUM_LDB_CREDITS,\n+\t\t.num_dir_credits_override = -1,\n+\t\t.defer_sched = 0,\n+\t\t.num_atm_inflights = DLB_NUM_ATOMIC_INFLIGHTS_PER_QUEUE,\n+\t};\n+\tstruct dlb_eventdev *dlb;\n+\n+\tDLB_LOG_DBG(\"Enter with dev_id=%d socket_id=%d\",\n+\t\t    eventdev->data->dev_id, eventdev->data->socket_id);\n+\n+\tdlb_entry_points_init(eventdev);\n+\n+\tdlb_pf_iface_fn_ptrs_init();\n+\n+\tpci_dev = RTE_DEV_TO_PCI(eventdev->dev);\n+\n+\tif (rte_eal_process_type() == RTE_PROC_PRIMARY) {\n+\t\tdlb = dlb_pmd_priv(eventdev); /* rte_zmalloc_socket mem */\n+\n+\t\t/* Probe the DLB PF layer */\n+\t\tdlb->qm_instance.pf_dev = dlb_probe(pci_dev);\n+\n+\t\tif (dlb->qm_instance.pf_dev == NULL) {\n+\t\t\tDLB_LOG_ERR(\"DLB PF Probe failed with error %d\\n\",\n+\t\t\t\t    rte_errno);\n+\t\t\tret = -rte_errno;\n+\t\t\tgoto dlb_probe_failed;\n+\t\t}\n+\n+\t\t/* Were we invoked with runtime parameters? */\n+\t\tif (pci_dev->device.devargs) {\n+\t\t\tret = dlb_parse_params(pci_dev->device.devargs->args,\n+\t\t\t\t\t       pci_dev->device.devargs->name,\n+\t\t\t\t\t       &dlb_args);\n+\t\t\tif (ret) {\n+\t\t\t\tDLB_LOG_ERR(\"PFPMD failed to parse args ret=%d, errno=%d\\n\",\n+\t\t\t\t\t    ret, rte_errno);\n+\t\t\t\tgoto dlb_probe_failed;\n+\t\t\t}\n+\t\t}\n+\n+\t\tret = dlb_primary_eventdev_probe(eventdev,\n+\t\t\t\t\t\t event_dlb_pf_name,\n+\t\t\t\t\t\t &dlb_args,\n+\t\t\t\t\t\t DLB_NOT_VDEV);\n+\t} else {\n+\t\tret = dlb_secondary_eventdev_probe(eventdev,\n+\t\t\t\t\t\t   event_dlb_pf_name,\n+\t\t\t\t\t\t   DLB_NOT_VDEV);\n+\t}\n+\tif (ret)\n+\t\tgoto dlb_probe_failed;\n+\n+\tDLB_LOG_INFO(\"DLB PF Probe success\\n\");\n+\n+\treturn 0;\n+\n+dlb_probe_failed:\n+\n+\tDLB_LOG_INFO(\"DLB PF Probe failed, ret=%d\\n\", ret);\n+\n+\treturn ret;\n+}\n+\n+#define EVENTDEV_INTEL_VENDOR_ID 0x8086\n+\n+static const struct rte_pci_id pci_id_dlb_map[] = {\n+\t{\n+\t\tRTE_PCI_DEVICE(EVENTDEV_INTEL_VENDOR_ID,\n+\t\t\t       DLB_PF_DEV_ID)\n+\t},\n+\t{\n+\t\t.vendor_id = 0,\n+\t},\n+};\n+\n+static int\n+event_dlb_pci_probe(struct rte_pci_driver *pci_drv,\n+\t\t    struct rte_pci_device *pci_dev)\n+{\n+\treturn rte_event_pmd_pci_probe_named(pci_drv, pci_dev,\n+\t\tsizeof(struct dlb_eventdev), dlb_eventdev_pci_init,\n+\t\tevent_dlb_pf_name);\n+}\n+\n+static int\n+event_dlb_pci_remove(struct rte_pci_device *pci_dev)\n+{\n+\treturn rte_event_pmd_pci_remove(pci_dev, NULL);\n+}\n+\n+static struct rte_pci_driver pci_eventdev_dlb_pmd = {\n+\t.id_table = pci_id_dlb_map,\n+\t.drv_flags = RTE_PCI_DRV_NEED_MAPPING,\n+\t.probe = event_dlb_pci_probe,\n+\t.remove = event_dlb_pci_remove,\n+};\n+\n+RTE_PMD_REGISTER_PCI(event_dlb_pf, pci_eventdev_dlb_pmd);\n+RTE_PMD_REGISTER_PCI_TABLE(event_dlb_pf, pci_id_dlb_map);\n",
    "prefixes": [
        "10/27"
    ]
}