get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/112829/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 112829,
    "url": "http://patches.dpdk.org/api/patches/112829/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1655348434-7096-3-git-send-email-wei.huang@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1655348434-7096-3-git-send-email-wei.huang@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1655348434-7096-3-git-send-email-wei.huang@intel.com",
    "date": "2022-06-16T03:00:31",
    "name": "[v8,2/5] raw/ifpga: add N3000 AFU driver",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "e5a51856d04b3a0da64df0fc4ca924f4ea035f96",
    "submitter": {
        "id": 2033,
        "url": "http://patches.dpdk.org/api/people/2033/?format=api",
        "name": "Wei Huang",
        "email": "wei.huang@intel.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1655348434-7096-3-git-send-email-wei.huang@intel.com/mbox/",
    "series": [
        {
            "id": 23549,
            "url": "http://patches.dpdk.org/api/series/23549/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=23549",
            "date": "2022-06-16T03:00:29",
            "name": "introduce AFU PMD driver of FPGA",
            "version": 8,
            "mbox": "http://patches.dpdk.org/series/23549/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/112829/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/112829/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 6E0D2A0547;\n\tThu, 16 Jun 2022 04:53:02 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 5E83D42BCF;\n\tThu, 16 Jun 2022 04:52:54 +0200 (CEST)",
            "from mga11.intel.com (mga11.intel.com [192.55.52.93])\n by mails.dpdk.org (Postfix) with ESMTP id B845742BD4;\n Thu, 16 Jun 2022 04:52:51 +0200 (CEST)",
            "from fmsmga003.fm.intel.com ([10.253.24.29])\n by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 15 Jun 2022 19:52:51 -0700",
            "from unknown (HELO zj-fpga-amt.sh.intel.com) ([10.238.175.102])\n by FMSMGA003.fm.intel.com with ESMTP; 15 Jun 2022 19:52:48 -0700"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1655347972; x=1686883972;\n h=from:to:cc:subject:date:message-id:in-reply-to: references;\n bh=IdpSNFEOZP6oWOZ5AWYOWdRQdjoNKa7AJ1oxEL5p0Lg=;\n b=RkPkxSAmnTjfP2cJQmC13gXu6CmRKOf24Jmk9wUhRbzfYe9ByobcYT2C\n fk7OGV4CwkSArLVV9EtflOPv/MVEfb5Hpnsjbn+L5+gHqibGQnX4I98cJ\n 7+pHZOzOwMFHLkBBUhu/vVwrEG3dNzlyHCdh5bzpKHaqZ07HfiInR8l5U\n WaNh4dVRFDtiiRMhfP7ww98Bi1JP2i9LQgZM2z6/4hg/V1CBsXakzVXiY\n piC3hrVAmj9Qf1fregQI1+mwq3088KzufAhPeIgsMMzqLTizGIKHb6CeF\n UJmylD+W9A89ayt2in47Rh5Nx723vRvEKB4ZGHVOMDY7RWUgG0xWhM6+n A==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6400,9594,10379\"; a=\"276725734\"",
            "E=Sophos;i=\"5.91,302,1647327600\"; d=\"scan'208\";a=\"276725734\"",
            "E=Sophos;i=\"5.91,302,1647327600\"; d=\"scan'208\";a=\"674824691\""
        ],
        "X-ExtLoop1": "1",
        "From": "Wei Huang <wei.huang@intel.com>",
        "To": "dev@dpdk.org, thomas@monjalon.net, nipun.gupta@nxp.com,\n hemant.agrawal@nxp.com",
        "Cc": "stable@dpdk.org, rosen.xu@intel.com, tianfei.zhang@intel.com,\n qi.z.zhang@intel.com, Wei Huang <wei.huang@intel.com>",
        "Subject": "[PATCH v8 2/5] raw/ifpga: add N3000 AFU driver",
        "Date": "Wed, 15 Jun 2022 23:00:31 -0400",
        "Message-Id": "<1655348434-7096-3-git-send-email-wei.huang@intel.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1655348434-7096-1-git-send-email-wei.huang@intel.com>",
        "References": "<1654760242-7832-1-git-send-email-wei.huang@intel.com>\n <1655348434-7096-1-git-send-email-wei.huang@intel.com>",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "N3000 AFU includes NLB0 and DMA modules, NLB0 is used to test PCI bus\nand DMA is used to test local memory.\nThis driver initialize the modules and report test result.\n\nSigned-off-by: Wei Huang <wei.huang@intel.com>\nAcked-by: Tianfei Zhang <tianfei.zhang@intel.com>\nReviewed-by: Rosen Xu <rosen.xu@intel.com>\n---\nv2: move source files to ifpga and rename, refine code\n---\nv3: fix Ubuntu 20.04 ARM build\n---\nv4: rename function name according to DPDK program guide\n---\n drivers/raw/ifpga/afu_pmd_core.h  |   19 +\n drivers/raw/ifpga/afu_pmd_n3000.c | 2019 +++++++++++++++++++++++++++++++++++++\n drivers/raw/ifpga/afu_pmd_n3000.h |  339 +++++++\n drivers/raw/ifpga/meson.build     |    3 +-\n drivers/raw/ifpga/rte_pmd_afu.h   |   97 ++\n 5 files changed, 2476 insertions(+), 1 deletion(-)\n create mode 100644 drivers/raw/ifpga/afu_pmd_n3000.c\n create mode 100644 drivers/raw/ifpga/afu_pmd_n3000.h\n create mode 100644 drivers/raw/ifpga/rte_pmd_afu.h",
    "diff": "diff --git a/drivers/raw/ifpga/afu_pmd_core.h b/drivers/raw/ifpga/afu_pmd_core.h\nindex 4fad2c7..a938172 100644\n--- a/drivers/raw/ifpga/afu_pmd_core.h\n+++ b/drivers/raw/ifpga/afu_pmd_core.h\n@@ -14,6 +14,7 @@\n #include <unistd.h>\n \n #include <rte_spinlock.h>\n+#include <rte_cycles.h>\n #include <rte_bus_ifpga.h>\n #include <rte_rawdev.h>\n \n@@ -60,6 +61,24 @@ struct afu_rawdev {\n \treturn rawdev ? (struct afu_rawdev *)rawdev->dev_private : NULL;\n }\n \n+#define CLS_TO_SIZE(n)  ((n) << 6)  /* get size of n cache lines */\n+#define SIZE_TO_CLS(s)  ((s) >> 6)  /* convert size to number of cache lines */\n+#define MHZ(f)  ((f) * 1000000)\n+\n+#define dsm_poll_timeout(addr, val, cond, invl, timeout) \\\n+({                                                       \\\n+\tuint64_t __wait = 0;                                 \\\n+\tuint64_t __invl = (invl);                            \\\n+\tuint64_t __timeout = (timeout);                      \\\n+\tfor (; __wait <= __timeout; __wait += __invl) {      \\\n+\t\t(val) = *(addr);                                 \\\n+\t\tif (cond)                                        \\\n+\t\t\tbreak;                                       \\\n+\t\trte_delay_ms(__invl);                            \\\n+\t}                                                    \\\n+\t(cond) ? 0 : 1;                                      \\\n+})\n+\n void afu_pmd_register(struct afu_rawdev_drv *driver);\n void afu_pmd_unregister(struct afu_rawdev_drv *driver);\n \ndiff --git a/drivers/raw/ifpga/afu_pmd_n3000.c b/drivers/raw/ifpga/afu_pmd_n3000.c\nnew file mode 100644\nindex 0000000..8708164\n--- /dev/null\n+++ b/drivers/raw/ifpga/afu_pmd_n3000.c\n@@ -0,0 +1,2019 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2022 Intel Corporation\n+ */\n+\n+#include <errno.h>\n+#include <stdio.h>\n+#include <stdint.h>\n+#include <stdlib.h>\n+#include <inttypes.h>\n+#include <unistd.h>\n+#include <fcntl.h>\n+#include <poll.h>\n+#include <sys/eventfd.h>\n+#include <sys/ioctl.h>\n+\n+#include <rte_eal.h>\n+#include <rte_malloc.h>\n+#include <rte_memcpy.h>\n+#include <rte_io.h>\n+#include <rte_vfio.h>\n+#include <rte_bus_pci.h>\n+#include <rte_bus_ifpga.h>\n+#include <rte_rawdev.h>\n+\n+#include \"afu_pmd_core.h\"\n+#include \"afu_pmd_n3000.h\"\n+\n+static int nlb_afu_config(struct afu_rawdev *dev)\n+{\n+\tstruct n3000_afu_priv *priv = NULL;\n+\tstruct rte_pmd_afu_nlb_cfg *cfg = NULL;\n+\tstruct nlb_csr_cfg v;\n+\n+\tif (!dev)\n+\t\treturn -EINVAL;\n+\n+\tif (!dev->priv)\n+\t\treturn -ENOENT;\n+\n+\tpriv = (struct n3000_afu_priv *)dev->priv;\n+\tcfg = &priv->nlb_cfg;\n+\n+\tv.csr = 0;\n+\n+\tif (cfg->cont)\n+\t\tv.cont = 1;\n+\n+\tif (cfg->cache_policy == NLB_WRPUSH_I)\n+\t\tv.wrpush_i = 1;\n+\telse\n+\t\tv.wrthru_en = cfg->cache_policy;\n+\n+\tif (cfg->cache_hint == NLB_RDLINE_MIXED)\n+\t\tv.rdsel = 3;\n+\telse\n+\t\tv.rdsel = cfg->cache_hint;\n+\n+\tv.mode = cfg->mode;\n+\tv.chsel = cfg->read_vc;\n+\tv.wr_chsel = cfg->write_vc;\n+\tv.wrfence_chsel = cfg->wrfence_vc;\n+\tv.wrthru_en = cfg->cache_policy;\n+\tv.multicl_len = cfg->multi_cl - 1;\n+\n+\tIFPGA_RAWDEV_PMD_DEBUG(\"cfg: 0x%08x\", v.csr);\n+\trte_write32(v.csr, priv->nlb_ctx.addr + CSR_CFG);\n+\n+\treturn 0;\n+}\n+\n+static void nlb_afu_report(struct afu_rawdev *dev, uint32_t cl)\n+{\n+\tstruct n3000_afu_priv *priv = NULL;\n+\tstruct rte_pmd_afu_nlb_cfg *cfg = NULL;\n+\tstruct nlb_dsm_status *stat = NULL;\n+\tuint64_t ticks = 0;\n+\tdouble num, rd_bw, wr_bw;\n+\n+\tif (!dev || !dev->priv)\n+\t\treturn;\n+\n+\tpriv = (struct n3000_afu_priv *)dev->priv;\n+\n+\tcfg = &priv->nlb_cfg;\n+\tstat = priv->nlb_ctx.status_ptr;\n+\n+\tif (cfg->cont)\n+\t\tticks = stat->num_clocks - stat->start_overhead;\n+\telse\n+\t\tticks = stat->num_clocks -\n+\t\t\t(stat->start_overhead + stat->end_overhead);\n+\n+\tif (cfg->freq_mhz == 0)\n+\t\tcfg->freq_mhz = 200;\n+\n+\tnum = (double)stat->num_reads;\n+\trd_bw = (num * CLS_TO_SIZE(1) * MHZ(cfg->freq_mhz)) / ticks;\n+\tnum = (double)stat->num_writes;\n+\twr_bw = (num * CLS_TO_SIZE(1) * MHZ(cfg->freq_mhz)) / ticks;\n+\n+\tprintf(\"Cachelines  Read_Count Write_Count Clocks@%uMHz   \"\n+\t\t\"Rd_Bandwidth   Wr_Bandwidth\\n\", cfg->freq_mhz);\n+\tprintf(\"%10u  %10u %11u  %12\"PRIu64\"   %7.3f GB/s   %7.3f GB/s\\n\",\n+\t\tcl, stat->num_reads, stat->num_writes, ticks,\n+\t\trd_bw / 1e9, wr_bw / 1e9);\n+}\n+\n+static int nlb_afu_test(struct afu_rawdev *dev)\n+{\n+\tstruct n3000_afu_priv *priv = NULL;\n+\tstruct nlb_afu_ctx *ctx = NULL;\n+\tstruct rte_pmd_afu_nlb_cfg *cfg = NULL;\n+\tstruct nlb_csr_ctl ctl;\n+\tuint32_t *ptr = NULL;\n+\tuint32_t i, j, cl, val = 0;\n+\tuint64_t sval = 0;\n+\tint ret = 0;\n+\n+\tif (!dev)\n+\t\treturn -EINVAL;\n+\n+\tif (!dev->priv)\n+\t\treturn -ENOENT;\n+\n+\tpriv = (struct n3000_afu_priv *)dev->priv;\n+\tctx = &priv->nlb_ctx;\n+\tcfg = &priv->nlb_cfg;\n+\n+\t/* initialize registers */\n+\tIFPGA_RAWDEV_PMD_DEBUG(\"dsm_addr: 0x%\"PRIx64, ctx->dsm_iova);\n+\trte_write64(ctx->dsm_iova, ctx->addr + CSR_AFU_DSM_BASEL);\n+\n+\tctl.csr = 0;\n+\trte_write32(ctl.csr, ctx->addr + CSR_CTL);\n+\tctl.reset = 1;\n+\trte_write32(ctl.csr, ctx->addr + CSR_CTL);\n+\n+\tIFPGA_RAWDEV_PMD_DEBUG(\"src_addr: 0x%\"PRIx64, ctx->src_iova);\n+\trte_write64(SIZE_TO_CLS(ctx->src_iova), ctx->addr + CSR_SRC_ADDR);\n+\tIFPGA_RAWDEV_PMD_DEBUG(\"dst_addr: 0x%\"PRIx64, ctx->dest_iova);\n+\trte_write64(SIZE_TO_CLS(ctx->dest_iova), ctx->addr + CSR_DST_ADDR);\n+\n+\tret = nlb_afu_config(dev);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\t/* initialize src data */\n+\tptr = (uint32_t *)ctx->src_ptr;\n+\tj = CLS_TO_SIZE(cfg->end) >> 2;\n+\tfor (i = 0; i < j; i++)\n+\t\t*ptr++ = i;\n+\n+\t/* start test */\n+\tfor (cl = cfg->begin; cl <= cfg->end; cl += cfg->multi_cl) {\n+\t\tmemset(ctx->dest_ptr, 0, CLS_TO_SIZE(cl));\n+\t\tmemset(ctx->dsm_ptr, 0, DSM_SIZE);\n+\n+\t\tctl.csr = 0;\n+\t\trte_write32(ctl.csr, ctx->addr + CSR_CTL);\n+\t\tctl.reset = 1;\n+\t\trte_write32(ctl.csr, ctx->addr + CSR_CTL);\n+\n+\t\trte_write32(cl, ctx->addr + CSR_NUM_LINES);\n+\n+\t\trte_delay_us(10);\n+\n+\t\tctl.start = 1;\n+\t\trte_write32(ctl.csr, ctx->addr + CSR_CTL);\n+\n+\t\tif (cfg->cont) {\n+\t\t\trte_delay_ms(cfg->timeout * 1000);\n+\t\t\tctl.force_completion = 1;\n+\t\t\trte_write32(ctl.csr, ctx->addr + CSR_CTL);\n+\t\t\tret = dsm_poll_timeout(&ctx->status_ptr->test_complete,\n+\t\t\t\tval, (val & 0x1) == 1, DSM_POLL_INTERVAL,\n+\t\t\t\tDSM_TIMEOUT);\n+\t\t\tif (ret) {\n+\t\t\t\tprintf(\"DSM poll timeout\\n\");\n+\t\t\t\tgoto end;\n+\t\t\t}\n+\t\t} else {\n+\t\t\tret = dsm_poll_timeout(&ctx->status_ptr->test_complete,\n+\t\t\t\tval, (val & 0x1) == 1, DSM_POLL_INTERVAL,\n+\t\t\t\tDSM_TIMEOUT);\n+\t\t\tif (ret) {\n+\t\t\t\tprintf(\"DSM poll timeout\\n\");\n+\t\t\t\tgoto end;\n+\t\t\t}\n+\t\t\tctl.force_completion = 1;\n+\t\t\trte_write32(ctl.csr, ctx->addr + CSR_CTL);\n+\t\t}\n+\n+\t\tnlb_afu_report(dev, cl);\n+\n+\t\ti = 0;\n+\t\twhile (i++ < 100) {\n+\t\t\tsval = rte_read64(ctx->addr + CSR_STATUS1);\n+\t\t\tif (sval == 0)\n+\t\t\t\tbreak;\n+\t\t\trte_delay_us(1000);\n+\t\t}\n+\n+\t\tptr = (uint32_t *)ctx->dest_ptr;\n+\t\tj = CLS_TO_SIZE(cl) >> 2;\n+\t\tfor (i = 0; i < j; i++) {\n+\t\t\tif (*ptr++ != i) {\n+\t\t\t\tIFPGA_RAWDEV_PMD_ERR(\"Data mismatch @ %u\", i);\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+end:\n+\treturn ret;\n+}\n+\n+static void dma_afu_buf_free(struct dma_afu_ctx *ctx)\n+{\n+\tint i = 0;\n+\n+\tif (!ctx)\n+\t\treturn;\n+\n+\tfor (i = 0; i < NUM_DMA_BUF; i++) {\n+\t\trte_free(ctx->dma_buf[i]);\n+\t\tctx->dma_buf[i] = NULL;\n+\t}\n+\n+\trte_free(ctx->data_buf);\n+\tctx->data_buf = NULL;\n+\n+\trte_free(ctx->ref_buf);\n+\tctx->ref_buf = NULL;\n+}\n+\n+static int dma_afu_buf_alloc(struct dma_afu_ctx *ctx,\n+\tstruct rte_pmd_afu_dma_cfg *cfg)\n+{\n+\tsize_t page_sz = sysconf(_SC_PAGE_SIZE);\n+\tint i, ret = 0;\n+\n+\tif (!ctx || !cfg)\n+\t\treturn -EINVAL;\n+\n+\tfor (i = 0; i < NUM_DMA_BUF; i++) {\n+\t\tctx->dma_buf[i] = (uint64_t *)rte_zmalloc(NULL, cfg->size,\n+\t\t\tTEST_MEM_ALIGN);\n+\t\tif (!ctx->dma_buf[i]) {\n+\t\t\tret = -ENOMEM;\n+\t\t\tgoto free_dma_buf;\n+\t\t}\n+\t\tctx->dma_iova[i] = rte_malloc_virt2iova(ctx->dma_buf[i]);\n+\t\tif (ctx->dma_iova[i] == RTE_BAD_IOVA) {\n+\t\t\tret = -ENOMEM;\n+\t\t\tgoto free_dma_buf;\n+\t\t}\n+\t}\n+\n+\tctx->data_buf = rte_malloc(NULL, cfg->length, page_sz);\n+\tif (!ctx->data_buf) {\n+\t\tret = -ENOMEM;\n+\t\tgoto free_dma_buf;\n+\t}\n+\n+\tctx->ref_buf = rte_malloc(NULL, cfg->length, page_sz);\n+\tif (!ctx->ref_buf) {\n+\t\tret = -ENOMEM;\n+\t\tgoto free_data_buf;\n+\t}\n+\n+\treturn 0;\n+\n+free_data_buf:\n+\trte_free(ctx->data_buf);\n+\tctx->data_buf = NULL;\n+free_dma_buf:\n+\tfor (i = 0; i < NUM_DMA_BUF; i++) {\n+\t\trte_free(ctx->dma_buf[i]);\n+\t\tctx->dma_buf[i] = NULL;\n+\t}\n+\treturn ret;\n+}\n+\n+static void dma_afu_buf_init(struct dma_afu_ctx *ctx, size_t size)\n+{\n+\tint *ptr = NULL;\n+\tsize_t i = 0;\n+\tsize_t dword_size = 0;\n+\n+\tif (!ctx || !size)\n+\t\treturn;\n+\n+\tptr = (int *)ctx->ref_buf;\n+\n+\tif (ctx->pattern) {\n+\t\tmemset(ptr, ctx->pattern, size);\n+\t} else {\n+\t\tsrand(99);\n+\t\tdword_size = size >> 2;\n+\t\tfor (i = 0; i < dword_size; i++)\n+\t\t\t*ptr++ = rand();\n+\t}\n+\trte_memcpy(ctx->data_buf, ctx->ref_buf, size);\n+}\n+\n+static int dma_afu_buf_verify(struct dma_afu_ctx *ctx, size_t size)\n+{\n+\tuint8_t *src = NULL;\n+\tuint8_t *dst = NULL;\n+\tsize_t i = 0;\n+\tint n = 0;\n+\n+\tif (!ctx || !size)\n+\t\treturn -EINVAL;\n+\n+\tsrc = (uint8_t *)ctx->ref_buf;\n+\tdst = (uint8_t *)ctx->data_buf;\n+\n+\tif (memcmp(src, dst, size)) {\n+\t\tprintf(\"Transfer is corrupted\\n\");\n+\t\tif (ctx->verbose) {\n+\t\t\tfor (i = 0; i < size; i++) {\n+\t\t\t\tif (*src != *dst) {\n+\t\t\t\t\tif (++n >= ERR_CHECK_LIMIT)\n+\t\t\t\t\t\tbreak;\n+\t\t\t\t\tprintf(\"Mismatch at 0x%zx, \"\n+\t\t\t\t\t\t\"Expected %02x  Actual %02x\\n\",\n+\t\t\t\t\t\ti, *src, *dst);\n+\t\t\t\t}\n+\t\t\t\tsrc++;\n+\t\t\t\tdst++;\n+\t\t\t}\n+\t\t\tif (n < ERR_CHECK_LIMIT) {\n+\t\t\t\tprintf(\"Found %d error bytes\\n\", n);\n+\t\t\t} else {\n+\t\t\t\tprintf(\"......\\n\");\n+\t\t\t\tprintf(\"Found more than %d error bytes\\n\", n);\n+\t\t\t}\n+\t\t}\n+\t\treturn -1;\n+\t}\n+\n+\tprintf(\"Transfer is verified\\n\");\n+\treturn 0;\n+}\n+\n+static void blk_write64(uint64_t *dev_addr, uint64_t *host_addr, uint64_t bytes)\n+{\n+\tuint64_t qwords = bytes / sizeof(uint64_t);\n+\n+\tif (!IS_ALIGNED_QWORD((uint64_t)dev_addr) ||\n+\t\t!IS_ALIGNED_QWORD((uint64_t)bytes))\n+\t\treturn;\n+\n+\tfor (; qwords > 0; qwords--, host_addr++, dev_addr++)\n+\t\trte_write64(*host_addr, dev_addr);\n+}\n+\n+static void blk_read64(uint64_t *dev_addr, uint64_t *host_addr, uint64_t bytes)\n+{\n+\tuint64_t qwords = bytes / sizeof(uint64_t);\n+\n+\tif (!IS_ALIGNED_QWORD((uint64_t)dev_addr) ||\n+\t\t!IS_ALIGNED_QWORD((uint64_t)bytes))\n+\t\treturn;\n+\n+\tfor (; qwords > 0; qwords--, host_addr++, dev_addr++)\n+\t\t*host_addr = rte_read64(dev_addr);\n+}\n+\n+static void switch_ase_page(struct dma_afu_ctx *ctx, uint64_t addr)\n+{\n+\tuint64_t requested_page = addr & ~DMA_ASE_WINDOW_MASK;\n+\n+\tif (!ctx)\n+\t\treturn;\n+\n+\tif (requested_page != ctx->cur_ase_page) {\n+\t\trte_write64(requested_page, ctx->ase_ctrl_addr);\n+\t\tctx->cur_ase_page = requested_page;\n+\t}\n+}\n+\n+static int ase_write_unaligned(struct dma_afu_ctx *ctx, uint64_t dev_addr,\n+\tuint64_t host_addr, uint32_t count)\n+{\n+\tuint64_t dev_aligned_addr = 0;\n+\tuint64_t shift = 0;\n+\tuint64_t val = 0;\n+\tuintptr_t addr = (uintptr_t)host_addr;  /* transfer to pointer size */\n+\n+\tIFPGA_RAWDEV_PMD_DEBUG(\"0x%\"PRIx64\" --> 0x%\"PRIx64\" (0x%x)\", host_addr,\n+\t\tdev_addr, count);\n+\n+\tif (!ctx || (count >= QWORD_BYTES))\n+\t\treturn -EINVAL;\n+\n+\tif (!count)\n+\t\treturn 0;\n+\n+\tswitch_ase_page(ctx, dev_addr);\n+\n+\tshift = dev_addr % QWORD_BYTES;\n+\tdev_aligned_addr = (dev_addr - shift) & DMA_ASE_WINDOW_MASK;\n+\tval = rte_read64(ctx->ase_data_addr + dev_aligned_addr);\n+\trte_memcpy(((char *)(&val)) + shift, (void *)addr, count);\n+\n+\t/* write back to device */\n+\trte_write64(val, ctx->ase_data_addr + dev_aligned_addr);\n+\n+\treturn 0;\n+}\n+\n+static int ase_write(struct dma_afu_ctx *ctx, uint64_t *dst_ptr,\n+\tuint64_t *src_ptr, uint64_t *count)\n+{\n+\tuint64_t src = *src_ptr;\n+\tuint64_t dst = *dst_ptr;\n+\tuint64_t align_bytes = *count;\n+\tuint64_t offset = 0;\n+\tuint64_t left_in_page = DMA_ASE_WINDOW;\n+\tuint64_t size_to_copy = 0;\n+\n+\tIFPGA_RAWDEV_PMD_DEBUG(\"0x%\"PRIx64\" --> 0x%\"PRIx64\" (0x%\"PRIx64\")\", src, dst,\n+\t\talign_bytes);\n+\n+\tif (!ctx || !IS_ALIGNED_DWORD(dst))\n+\t\treturn -EINVAL;\n+\n+\tif (align_bytes < DWORD_BYTES)\n+\t\treturn 0;\n+\n+\tif (!IS_ALIGNED_QWORD(dst)) {\n+\t\t/* Write out a single DWORD to get QWORD aligned */\n+\t\tswitch_ase_page(ctx, dst);\n+\t\toffset = dst & DMA_ASE_WINDOW_MASK;\n+\n+\t\trte_write32(*(uint32_t *)(uintptr_t)src,\n+\t\t\tctx->ase_data_addr + offset);\n+\t\tsrc += DWORD_BYTES;\n+\t\tdst += DWORD_BYTES;\n+\t\talign_bytes -= DWORD_BYTES;\n+\t}\n+\n+\tif (!align_bytes)\n+\t\treturn 0;\n+\n+\t/* Write out blocks of 64-bit values */\n+\twhile (align_bytes >= QWORD_BYTES) {\n+\t\tleft_in_page -= dst & DMA_ASE_WINDOW_MASK;\n+\t\tsize_to_copy =\n+\t\t\tMIN(left_in_page, (align_bytes & ~(QWORD_BYTES - 1)));\n+\t\tif (size_to_copy < QWORD_BYTES)\n+\t\t\tbreak;\n+\t\tswitch_ase_page(ctx, dst);\n+\t\toffset = dst & DMA_ASE_WINDOW_MASK;\n+\t\tblk_write64((uint64_t *)(ctx->ase_data_addr + offset),\n+\t\t\t(uint64_t *)(uintptr_t)src, size_to_copy);\n+\t\tsrc += size_to_copy;\n+\t\tdst += size_to_copy;\n+\t\talign_bytes -= size_to_copy;\n+\t}\n+\n+\tif (align_bytes >= DWORD_BYTES) {\n+\t\t/* Write out remaining DWORD */\n+\t\tswitch_ase_page(ctx, dst);\n+\t\toffset = dst & DMA_ASE_WINDOW_MASK;\n+\t\trte_write32(*(uint32_t *)(uintptr_t)src,\n+\t\t\tctx->ase_data_addr + offset);\n+\t\tsrc += DWORD_BYTES;\n+\t\tdst += DWORD_BYTES;\n+\t\talign_bytes -= DWORD_BYTES;\n+\t}\n+\n+\t*src_ptr = src;\n+\t*dst_ptr = dst;\n+\t*count = align_bytes;\n+\n+\treturn 0;\n+}\n+\n+static int ase_host_to_fpga(struct dma_afu_ctx *ctx, uint64_t *dst_ptr,\n+\tuint64_t *src_ptr, uint64_t count)\n+{\n+\tuint64_t dst = *dst_ptr;\n+\tuint64_t src = *src_ptr;\n+\tuint64_t count_left = count;\n+\tuint64_t unaligned_size = 0;\n+\tint ret = 0;\n+\n+\tIFPGA_RAWDEV_PMD_DEBUG(\"0x%\"PRIx64\" --> 0x%\"PRIx64\" (0x%\"PRIx64\")\", src, dst,\n+\t\tcount);\n+\n+\t/* aligns address to 8 byte using dst masking method */\n+\tif (!IS_ALIGNED_DWORD(dst) && !IS_ALIGNED_QWORD(dst)) {\n+\t\tunaligned_size = QWORD_BYTES - (dst % QWORD_BYTES);\n+\t\tif (unaligned_size > count_left)\n+\t\t\tunaligned_size = count_left;\n+\t\tret = ase_write_unaligned(ctx, dst, src, unaligned_size);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t\tcount_left -= unaligned_size;\n+\t\tsrc += unaligned_size;\n+\t\tdst += unaligned_size;\n+\t}\n+\n+\t/* Handles 8/4 byte MMIO transfer */\n+\tret = ase_write(ctx, &dst, &src, &count_left);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\t/* Left over unaligned bytes transferred using dst masking method */\n+\tunaligned_size = QWORD_BYTES - (dst % QWORD_BYTES);\n+\tif (unaligned_size > count_left)\n+\t\tunaligned_size = count_left;\n+\n+\tret = ase_write_unaligned(ctx, dst, src, unaligned_size);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tcount_left -= unaligned_size;\n+\t*dst_ptr = dst + unaligned_size;\n+\t*src_ptr = src + unaligned_size;\n+\n+\treturn 0;\n+}\n+\n+static int ase_read_unaligned(struct dma_afu_ctx *ctx, uint64_t dev_addr,\n+\tuint64_t host_addr, uint32_t count)\n+{\n+\tuint64_t dev_aligned_addr = 0;\n+\tuint64_t shift = 0;\n+\tuint64_t val = 0;\n+\tuintptr_t addr = (uintptr_t)host_addr;  /* transfer to pointer size */\n+\n+\tIFPGA_RAWDEV_PMD_DEBUG(\"0x%\"PRIx64\" <-- 0x%\"PRIx64\" (0x%x)\", host_addr,\n+\t\tdev_addr, count);\n+\n+\tif (!ctx || (count >= QWORD_BYTES))\n+\t\treturn -EINVAL;\n+\n+\tif (!count)\n+\t\treturn 0;\n+\n+\tswitch_ase_page(ctx, dev_addr);\n+\n+\tshift = dev_addr % QWORD_BYTES;\n+\tdev_aligned_addr = (dev_addr - shift) & DMA_ASE_WINDOW_MASK;\n+\tval = rte_read64(ctx->ase_data_addr + dev_aligned_addr);\n+\trte_memcpy((void *)addr, ((char *)(&val)) + shift, count);\n+\n+\treturn 0;\n+}\n+\n+static int ase_read(struct dma_afu_ctx *ctx, uint64_t *src_ptr,\n+\tuint64_t *dst_ptr, uint64_t *count)\n+{\n+\tuint64_t src = *src_ptr;\n+\tuint64_t dst = *dst_ptr;\n+\tuint64_t align_bytes = *count;\n+\tuint64_t offset = 0;\n+\tuint64_t left_in_page = DMA_ASE_WINDOW;\n+\tuint64_t size_to_copy = 0;\n+\n+\tIFPGA_RAWDEV_PMD_DEBUG(\"0x%\"PRIx64\" <-- 0x%\"PRIx64\" (0x%\"PRIx64\")\", dst, src,\n+\t\talign_bytes);\n+\n+\tif (!ctx || !IS_ALIGNED_DWORD(src))\n+\t\treturn -EINVAL;\n+\n+\tif (align_bytes < DWORD_BYTES)\n+\t\treturn 0;\n+\n+\tif (!IS_ALIGNED_QWORD(src)) {\n+\t\t/* Read a single DWORD to get QWORD aligned */\n+\t\tswitch_ase_page(ctx, src);\n+\t\toffset = src & DMA_ASE_WINDOW_MASK;\n+\t\t*(uint32_t *)(uintptr_t)dst =\n+\t\t\trte_read32(ctx->ase_data_addr + offset);\n+\t\tsrc += DWORD_BYTES;\n+\t\tdst += DWORD_BYTES;\n+\t\talign_bytes -= DWORD_BYTES;\n+\t}\n+\n+\tif (!align_bytes)\n+\t\treturn 0;\n+\n+\t/* Read blocks of 64-bit values */\n+\twhile (align_bytes >= QWORD_BYTES) {\n+\t\tleft_in_page -= src & DMA_ASE_WINDOW_MASK;\n+\t\tsize_to_copy =\n+\t\t\tMIN(left_in_page, (align_bytes & ~(QWORD_BYTES - 1)));\n+\t\tif (size_to_copy < QWORD_BYTES)\n+\t\t\tbreak;\n+\t\tswitch_ase_page(ctx, src);\n+\t\toffset = src & DMA_ASE_WINDOW_MASK;\n+\t\tblk_read64((uint64_t *)(ctx->ase_data_addr + offset),\n+\t\t\t(uint64_t *)(uintptr_t)dst, size_to_copy);\n+\t\tsrc += size_to_copy;\n+\t\tdst += size_to_copy;\n+\t\talign_bytes -= size_to_copy;\n+\t}\n+\n+\tif (align_bytes >= DWORD_BYTES) {\n+\t\t/* Read remaining DWORD */\n+\t\tswitch_ase_page(ctx, src);\n+\t\toffset = src & DMA_ASE_WINDOW_MASK;\n+\t\t*(uint32_t *)(uintptr_t)dst =\n+\t\t\trte_read32(ctx->ase_data_addr + offset);\n+\t\tsrc += DWORD_BYTES;\n+\t\tdst += DWORD_BYTES;\n+\t\talign_bytes -= DWORD_BYTES;\n+\t}\n+\n+\t*src_ptr = src;\n+\t*dst_ptr = dst;\n+\t*count = align_bytes;\n+\n+\treturn 0;\n+}\n+\n+static int ase_fpga_to_host(struct dma_afu_ctx *ctx, uint64_t *src_ptr,\n+\tuint64_t *dst_ptr, uint64_t count)\n+{\n+\tuint64_t src = *src_ptr;\n+\tuint64_t dst = *dst_ptr;\n+\tuint64_t count_left = count;\n+\tuint64_t unaligned_size = 0;\n+\tint ret = 0;\n+\n+\tIFPGA_RAWDEV_PMD_DEBUG(\"0x%\"PRIx64\" --> 0x%\"PRIx64\" (0x%\"PRIx64\")\", src, dst,\n+\t\tcount);\n+\n+\t/* Aligns address to 8 byte using src masking method */\n+\tif (!IS_ALIGNED_DWORD(src) && !IS_ALIGNED_QWORD(src)) {\n+\t\tunaligned_size = QWORD_BYTES - (src % QWORD_BYTES);\n+\t\tif (unaligned_size > count_left)\n+\t\t\tunaligned_size = count_left;\n+\t\tret = ase_read_unaligned(ctx, src, dst, unaligned_size);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t\tcount_left -= unaligned_size;\n+\t\tdst += unaligned_size;\n+\t\tsrc += unaligned_size;\n+\t}\n+\n+\t/* Handles 8/4 byte MMIO transfer */\n+\tret = ase_read(ctx, &src, &dst, &count_left);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\t/* Left over unaligned bytes transferred using src masking method */\n+\tunaligned_size = QWORD_BYTES - (src % QWORD_BYTES);\n+\tif (unaligned_size > count_left)\n+\t\tunaligned_size = count_left;\n+\n+\tret = ase_read_unaligned(ctx, src, dst, unaligned_size);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tcount_left -= unaligned_size;\n+\t*dst_ptr = dst + unaligned_size;\n+\t*src_ptr = src + unaligned_size;\n+\n+\treturn 0;\n+}\n+\n+static void clear_interrupt(struct dma_afu_ctx *ctx)\n+{\n+\t/* clear interrupt by writing 1 to IRQ bit in status register */\n+\tmsgdma_status status;\n+\n+\tif (!ctx)\n+\t\treturn;\n+\n+\tstatus.csr = 0;\n+\tstatus.irq = 1;\n+\trte_write32(status.csr, CSR_STATUS(ctx->csr_addr));\n+}\n+\n+static int poll_interrupt(struct dma_afu_ctx *ctx)\n+{\n+\tstruct pollfd pfd = {0};\n+\tuint64_t count = 0;\n+\tssize_t bytes_read = 0;\n+\tint poll_ret = 0;\n+\tint ret = 0;\n+\n+\tif (!ctx || (ctx->event_fd < 0))\n+\t\treturn -EINVAL;\n+\n+\tpfd.fd = ctx->event_fd;\n+\tpfd.events = POLLIN;\n+\tpoll_ret = poll(&pfd, 1, DMA_TIMEOUT_MSEC);\n+\tif (poll_ret < 0) {\n+\t\tIFPGA_RAWDEV_PMD_ERR(\"Error %s\", strerror(errno));\n+\t\tret = -EFAULT;\n+\t\tgoto out;\n+\t} else if (poll_ret == 0) {\n+\t\tIFPGA_RAWDEV_PMD_ERR(\"Timeout\");\n+\t\tret = -ETIMEDOUT;\n+\t} else {\n+\t\tbytes_read = read(pfd.fd, &count, sizeof(count));\n+\t\tif (bytes_read > 0) {\n+\t\t\tif (ctx->verbose)\n+\t\t\t\tIFPGA_RAWDEV_PMD_DEBUG(\"Successful, ret %d, cnt %\"PRIu64,\n+\t\t\t\t\tpoll_ret, count);\n+\t\t\tret = 0;\n+\t\t} else {\n+\t\t\tIFPGA_RAWDEV_PMD_ERR(\"Failed %s\", bytes_read > 0 ?\n+\t\t\t\tstrerror(errno) : \"zero bytes read\");\n+\t\t\tret = -EIO;\n+\t\t}\n+\t}\n+out:\n+\tclear_interrupt(ctx);\n+\treturn ret;\n+}\n+\n+static void send_descriptor(struct dma_afu_ctx *ctx, msgdma_ext_desc *desc)\n+{\n+\tmsgdma_status status;\n+\tuint64_t fpga_queue_full = 0;\n+\n+\tif (!ctx)\n+\t\treturn;\n+\n+\tif (ctx->verbose) {\n+\t\tIFPGA_RAWDEV_PMD_DEBUG(\"descriptor.rd_address = 0x%x%08x\",\n+\t\t\tdesc->rd_address_ext, desc->rd_address);\n+\t\tIFPGA_RAWDEV_PMD_DEBUG(\"descriptor.wr_address = 0x%x%08x\",\n+\t\t\tdesc->wr_address_ext, desc->wr_address);\n+\t\tIFPGA_RAWDEV_PMD_DEBUG(\"descriptor.len = %u\", desc->len);\n+\t\tIFPGA_RAWDEV_PMD_DEBUG(\"descriptor.wr_burst_count = %u\",\n+\t\t\tdesc->wr_burst_count);\n+\t\tIFPGA_RAWDEV_PMD_DEBUG(\"descriptor.rd_burst_count = %u\",\n+\t\t\tdesc->rd_burst_count);\n+\t\tIFPGA_RAWDEV_PMD_DEBUG(\"descriptor.wr_stride %u\", desc->wr_stride);\n+\t\tIFPGA_RAWDEV_PMD_DEBUG(\"descriptor.rd_stride %u\", desc->rd_stride);\n+\t}\n+\n+\tdo {\n+\t\tstatus.csr = rte_read32(CSR_STATUS(ctx->csr_addr));\n+\t\tif (fpga_queue_full++ > 100000000) {\n+\t\t\tIFPGA_RAWDEV_PMD_DEBUG(\"DMA queue full retry\");\n+\t\t\tfpga_queue_full = 0;\n+\t\t}\n+\t} while (status.desc_buf_full);\n+\n+\tblk_write64((uint64_t *)ctx->desc_addr, (uint64_t *)desc,\n+\t\tsizeof(*desc));\n+}\n+\n+static int do_dma(struct dma_afu_ctx *ctx, uint64_t dst, uint64_t src,\n+\tint count, int is_last_desc, fpga_dma_type type, int intr_en)\n+{\n+\tmsgdma_ext_desc *desc = NULL;\n+\tint alignment_offset = 0;\n+\tint segment_size = 0;\n+\n+\tif (!ctx)\n+\t\treturn -EINVAL;\n+\n+\t/* src, dst and count must be 64-byte aligned */\n+\tif (!IS_DMA_ALIGNED(src) || !IS_DMA_ALIGNED(dst) ||\n+\t\t!IS_DMA_ALIGNED(count))\n+\t\treturn -EINVAL;\n+\tmemset(ctx->desc_buf, 0, sizeof(msgdma_ext_desc));\n+\n+\t/* these fields are fixed for all DMA transfers */\n+\tdesc = ctx->desc_buf;\n+\tdesc->seq_num = 0;\n+\tdesc->wr_stride = 1;\n+\tdesc->rd_stride = 1;\n+\tdesc->control.go = 1;\n+\tif (intr_en)\n+\t\tdesc->control.transfer_irq_en = 1;\n+\telse\n+\t\tdesc->control.transfer_irq_en = 0;\n+\n+\tif (!is_last_desc)\n+\t\tdesc->control.early_done_en = 1;\n+\telse\n+\t\tdesc->control.early_done_en = 0;\n+\n+\tif (type == FPGA_TO_FPGA) {\n+\t\tdesc->rd_address = src & DMA_MASK_32_BIT;\n+\t\tdesc->wr_address = dst & DMA_MASK_32_BIT;\n+\t\tdesc->len = count;\n+\t\tdesc->wr_burst_count = 4;\n+\t\tdesc->rd_burst_count = 4;\n+\t\tdesc->rd_address_ext = (src >> 32) & DMA_MASK_32_BIT;\n+\t\tdesc->wr_address_ext = (dst >> 32) & DMA_MASK_32_BIT;\n+\t\tsend_descriptor(ctx, desc);\n+\t} else {\n+\t\t/* check CCIP (host) address is aligned to 4CL (256B) */\n+\t\talignment_offset = (type == HOST_TO_FPGA)\n+\t\t\t? (src % CCIP_ALIGN_BYTES) : (dst % CCIP_ALIGN_BYTES);\n+\t\t/* performing a short transfer to get aligned */\n+\t\tif (alignment_offset != 0) {\n+\t\t\tdesc->rd_address = src & DMA_MASK_32_BIT;\n+\t\t\tdesc->wr_address = dst & DMA_MASK_32_BIT;\n+\t\t\tdesc->wr_burst_count = 1;\n+\t\t\tdesc->rd_burst_count = 1;\n+\t\t\tdesc->rd_address_ext = (src >> 32) & DMA_MASK_32_BIT;\n+\t\t\tdesc->wr_address_ext = (dst >> 32) & DMA_MASK_32_BIT;\n+\t\t\t/* count isn't large enough to hit next 4CL boundary */\n+\t\t\tif ((CCIP_ALIGN_BYTES - alignment_offset) >= count) {\n+\t\t\t\tsegment_size = count;\n+\t\t\t\tcount = 0;\n+\t\t\t} else {\n+\t\t\t\tsegment_size = CCIP_ALIGN_BYTES\n+\t\t\t\t\t- alignment_offset;\n+\t\t\t\tsrc += segment_size;\n+\t\t\t\tdst += segment_size;\n+\t\t\t\tcount -= segment_size;\n+\t\t\t\tdesc->control.transfer_irq_en = 0;\n+\t\t\t}\n+\t\t\t/* post short transfer to align to a 4CL (256 byte) */\n+\t\t\tdesc->len = segment_size;\n+\t\t\tsend_descriptor(ctx, desc);\n+\t\t}\n+\t\t/* at this point we are 4CL (256 byte) aligned */\n+\t\tif (count >= CCIP_ALIGN_BYTES) {\n+\t\t\tdesc->rd_address = src & DMA_MASK_32_BIT;\n+\t\t\tdesc->wr_address = dst & DMA_MASK_32_BIT;\n+\t\t\tdesc->wr_burst_count = 4;\n+\t\t\tdesc->rd_burst_count = 4;\n+\t\t\tdesc->rd_address_ext = (src >> 32) & DMA_MASK_32_BIT;\n+\t\t\tdesc->wr_address_ext = (dst >> 32) & DMA_MASK_32_BIT;\n+\t\t\t/* buffer ends on 4CL boundary */\n+\t\t\tif ((count % CCIP_ALIGN_BYTES) == 0) {\n+\t\t\t\tsegment_size = count;\n+\t\t\t\tcount = 0;\n+\t\t\t} else {\n+\t\t\t\tsegment_size = count\n+\t\t\t\t\t- (count % CCIP_ALIGN_BYTES);\n+\t\t\t\tsrc += segment_size;\n+\t\t\t\tdst += segment_size;\n+\t\t\t\tcount -= segment_size;\n+\t\t\t\tdesc->control.transfer_irq_en = 0;\n+\t\t\t}\n+\t\t\tdesc->len = segment_size;\n+\t\t\tsend_descriptor(ctx, desc);\n+\t\t}\n+\t\t/* post short transfer to handle the remainder */\n+\t\tif (count > 0) {\n+\t\t\tdesc->rd_address = src & DMA_MASK_32_BIT;\n+\t\t\tdesc->wr_address = dst & DMA_MASK_32_BIT;\n+\t\t\tdesc->len = count;\n+\t\t\tdesc->wr_burst_count = 1;\n+\t\t\tdesc->rd_burst_count = 1;\n+\t\t\tdesc->rd_address_ext = (src >> 32) & DMA_MASK_32_BIT;\n+\t\t\tdesc->wr_address_ext = (dst >> 32) & DMA_MASK_32_BIT;\n+\t\t\tif (intr_en)\n+\t\t\t\tdesc->control.transfer_irq_en = 1;\n+\t\t\tsend_descriptor(ctx, desc);\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int issue_magic(struct dma_afu_ctx *ctx)\n+{\n+\t*(ctx->magic_buf) = 0ULL;\n+\treturn do_dma(ctx, DMA_WF_HOST_ADDR(ctx->magic_iova),\n+\t\tDMA_WF_MAGIC_ROM, 64, 1, FPGA_TO_HOST, 1);\n+}\n+\n+static void wait_magic(struct dma_afu_ctx *ctx)\n+{\n+\tint magic_timeout = 0;\n+\n+\tif (!ctx)\n+\t\treturn;\n+\n+\tpoll_interrupt(ctx);\n+\twhile (*(ctx->magic_buf) != DMA_WF_MAGIC) {\n+\t\tif (magic_timeout++ > 1000) {\n+\t\t\tIFPGA_RAWDEV_PMD_ERR(\"DMA magic operation timeout\");\n+\t\t\tmagic_timeout = 0;\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\t*(ctx->magic_buf) = 0ULL;\n+}\n+\n+static int dma_tx_buf(struct dma_afu_ctx *ctx, uint64_t dst, uint64_t src,\n+\tuint64_t chunk, int is_last_chunk, int *intr_issued)\n+{\n+\tint intr_en = 0;\n+\tint ret = 0;\n+\n+\tif (!ctx || !intr_issued)\n+\t\treturn -EINVAL;\n+\n+\tsrc += chunk * ctx->dma_buf_size;\n+\tdst += chunk * ctx->dma_buf_size;\n+\n+\tif (((chunk % HALF_DMA_BUF) == (HALF_DMA_BUF - 1)) || is_last_chunk) {\n+\t\tif (*intr_issued) {\n+\t\t\tret = poll_interrupt(ctx);\n+\t\t\tif (ret)\n+\t\t\t\treturn ret;\n+\t\t}\n+\t\tintr_en = 1;\n+\t}\n+\n+\tchunk %= NUM_DMA_BUF;\n+\trte_memcpy(ctx->dma_buf[chunk], (void *)(uintptr_t)src,\n+\t\tctx->dma_buf_size);\n+\tret = do_dma(ctx, dst, DMA_HOST_ADDR(ctx->dma_iova[chunk]),\n+\t\t\tctx->dma_buf_size, 0, HOST_TO_FPGA, intr_en);\n+\tif (intr_en)\n+\t\t*intr_issued = 1;\n+\n+\treturn ret;\n+}\n+\n+static int dma_host_to_fpga(struct dma_afu_ctx *ctx, uint64_t dst, uint64_t src,\n+\tsize_t count)\n+{\n+\tuint64_t i = 0;\n+\tuint64_t count_left = count;\n+\tuint64_t aligned_addr = 0;\n+\tuint64_t align_bytes = 0;\n+\tuint64_t dma_chunks = 0;\n+\tuint64_t dma_tx_bytes = 0;\n+\tuint64_t offset = 0;\n+\tint issued_intr = 0;\n+\tint ret = 0;\n+\n+\tIFPGA_RAWDEV_PMD_DEBUG(\"0x%\"PRIx64\" ---> 0x%\"PRIx64\" (%zu)\", src, dst,\n+\t\tcount);\n+\n+\tif (!ctx)\n+\t\treturn -EINVAL;\n+\n+\tif (!IS_DMA_ALIGNED(dst)) {\n+\t\tif (count_left < DMA_ALIGN_BYTES)\n+\t\t\treturn ase_host_to_fpga(ctx, &dst, &src, count_left);\n+\n+\t\taligned_addr = ((dst / DMA_ALIGN_BYTES) + 1)\n+\t\t\t* DMA_ALIGN_BYTES;\n+\t\talign_bytes = aligned_addr - dst;\n+\t\tret = ase_host_to_fpga(ctx, &dst, &src, align_bytes);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t\tcount_left = count_left - align_bytes;\n+\t}\n+\n+\tif (count_left) {\n+\t\tdma_chunks = count_left / ctx->dma_buf_size;\n+\t\toffset = dma_chunks * ctx->dma_buf_size;\n+\t\tcount_left -= offset;\n+\t\tIFPGA_RAWDEV_PMD_DEBUG(\"0x%\"PRIx64\" ---> 0x%\"PRIx64\n+\t\t\t\" (%\"PRIu64\"...0x%\"PRIx64\")\",\n+\t\t\tsrc, dst, dma_chunks, count_left);\n+\t\tfor (i = 0; i < dma_chunks; i++) {\n+\t\t\tret = dma_tx_buf(ctx, dst, src, i,\n+\t\t\t\ti == (dma_chunks - 1), &issued_intr);\n+\t\t\tif (ret)\n+\t\t\t\treturn ret;\n+\t\t}\n+\n+\t\tif (issued_intr) {\n+\t\t\tret = poll_interrupt(ctx);\n+\t\t\tif (ret)\n+\t\t\t\treturn ret;\n+\t\t}\n+\n+\t\tif (count_left) {\n+\t\t\ti = count_left / DMA_ALIGN_BYTES;\n+\t\t\tif (i > 0) {\n+\t\t\t\tdma_tx_bytes = i * DMA_ALIGN_BYTES;\n+\t\t\t\tIFPGA_RAWDEV_PMD_DEBUG(\"left over 0x%\"PRIx64\" to DMA\",\n+\t\t\t\t\tdma_tx_bytes);\n+\t\t\t\trte_memcpy(ctx->dma_buf[0],\n+\t\t\t\t\t(void *)(uintptr_t)(src + offset),\n+\t\t\t\t\tdma_tx_bytes);\n+\t\t\t\tret = do_dma(ctx, dst + offset,\n+\t\t\t\t\tDMA_HOST_ADDR(ctx->dma_iova[0]),\n+\t\t\t\t\tdma_tx_bytes, 1, HOST_TO_FPGA, 1);\n+\t\t\t\tif (ret)\n+\t\t\t\t\treturn ret;\n+\t\t\t\tret = poll_interrupt(ctx);\n+\t\t\t\tif (ret)\n+\t\t\t\t\treturn ret;\n+\t\t\t}\n+\n+\t\t\tcount_left -= dma_tx_bytes;\n+\t\t\tif (count_left) {\n+\t\t\t\tIFPGA_RAWDEV_PMD_DEBUG(\"left over 0x%\"PRIx64\" to ASE\",\n+\t\t\t\t\tcount_left);\n+\t\t\t\tdst += offset + dma_tx_bytes;\n+\t\t\t\tsrc += offset + dma_tx_bytes;\n+\t\t\t\tret = ase_host_to_fpga(ctx, &dst, &src,\n+\t\t\t\t\tcount_left);\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\treturn ret;\n+}\n+\n+static int dma_rx_buf(struct dma_afu_ctx *ctx, uint64_t dst, uint64_t src,\n+\tuint64_t chunk, int is_last_chunk, uint64_t *rx_count, int *wf_issued)\n+{\n+\tuint64_t i = chunk % NUM_DMA_BUF;\n+\tuint64_t n = *rx_count;\n+\tuint64_t num_pending = 0;\n+\tint ret = 0;\n+\n+\tif (!ctx || !wf_issued)\n+\t\treturn -EINVAL;\n+\n+\tret = do_dma(ctx, DMA_HOST_ADDR(ctx->dma_iova[i]),\n+\t\tsrc + chunk * ctx->dma_buf_size,\n+\t\tctx->dma_buf_size, 1, FPGA_TO_HOST, 0);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tnum_pending = chunk - n + 1;\n+\tif (num_pending == HALF_DMA_BUF) {\n+\t\tret = issue_magic(ctx);\n+\t\tif (ret) {\n+\t\t\tIFPGA_RAWDEV_PMD_DEBUG(\"Magic issue failed\");\n+\t\t\treturn ret;\n+\t\t}\n+\t\t*wf_issued = 1;\n+\t}\n+\n+\tif ((num_pending > (NUM_DMA_BUF - 1)) || is_last_chunk) {\n+\t\tif (*wf_issued) {\n+\t\t\twait_magic(ctx);\n+\t\t\tfor (i = 0; i < HALF_DMA_BUF; i++) {\n+\t\t\t\trte_memcpy((void *)(uintptr_t)(dst +\n+\t\t\t\t\t\tn * ctx->dma_buf_size),\n+\t\t\t\t\tctx->dma_buf[n % NUM_DMA_BUF],\n+\t\t\t\t\tctx->dma_buf_size);\n+\t\t\t\tn++;\n+\t\t\t}\n+\t\t\t*wf_issued = 0;\n+\t\t\t*rx_count = n;\n+\t\t}\n+\t\tret = issue_magic(ctx);\n+\t\tif (ret) {\n+\t\t\tIFPGA_RAWDEV_PMD_DEBUG(\"Magic issue failed\");\n+\t\t\treturn ret;\n+\t\t}\n+\t\t*wf_issued = 1;\n+\t}\n+\n+\treturn ret;\n+}\n+\n+static int dma_fpga_to_host(struct dma_afu_ctx *ctx, uint64_t dst, uint64_t src,\n+\tsize_t count)\n+{\n+\tuint64_t i = 0;\n+\tuint64_t count_left = count;\n+\tuint64_t aligned_addr = 0;\n+\tuint64_t align_bytes = 0;\n+\tuint64_t dma_chunks = 0;\n+\tuint64_t pending_buf = 0;\n+\tuint64_t dma_rx_bytes = 0;\n+\tuint64_t offset = 0;\n+\tint wf_issued = 0;\n+\tint ret = 0;\n+\n+\tIFPGA_RAWDEV_PMD_DEBUG(\"0x%\"PRIx64\" ---> 0x%\"PRIx64\" (%zu)\", src, dst,\n+\t\tcount);\n+\n+\tif (!ctx)\n+\t\treturn -EINVAL;\n+\n+\tif (!IS_DMA_ALIGNED(src)) {\n+\t\tif (count_left < DMA_ALIGN_BYTES)\n+\t\t\treturn ase_fpga_to_host(ctx, &src, &dst, count_left);\n+\n+\t\taligned_addr = ((src / DMA_ALIGN_BYTES) + 1)\n+\t\t\t * DMA_ALIGN_BYTES;\n+\t\talign_bytes = aligned_addr - src;\n+\t\tret = ase_fpga_to_host(ctx, &src, &dst, align_bytes);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t\tcount_left = count_left - align_bytes;\n+\t}\n+\n+\tif (count_left) {\n+\t\tdma_chunks = count_left / ctx->dma_buf_size;\n+\t\toffset = dma_chunks * ctx->dma_buf_size;\n+\t\tcount_left -= offset;\n+\t\tIFPGA_RAWDEV_PMD_DEBUG(\"0x%\"PRIx64\" ---> 0x%\"PRIx64\n+\t\t\t\" (%\"PRIu64\"...0x%\"PRIx64\")\",\n+\t\t\tsrc, dst, dma_chunks, count_left);\n+\t\tfor (i = 0; i < dma_chunks; i++) {\n+\t\t\tret = dma_rx_buf(ctx, dst, src, i,\n+\t\t\t\ti == (dma_chunks - 1),\n+\t\t\t\t&pending_buf, &wf_issued);\n+\t\t\tif (ret)\n+\t\t\t\treturn ret;\n+\t\t}\n+\n+\t\tif (wf_issued)\n+\t\t\twait_magic(ctx);\n+\n+\t\t/* clear out final dma memcpy operations */\n+\t\twhile (pending_buf < dma_chunks) {\n+\t\t\t/* constant size transfer; no length check required */\n+\t\t\trte_memcpy((void *)(uintptr_t)(dst +\n+\t\t\t\t\tpending_buf * ctx->dma_buf_size),\n+\t\t\t\tctx->dma_buf[pending_buf % NUM_DMA_BUF],\n+\t\t\t\tctx->dma_buf_size);\n+\t\t\tpending_buf++;\n+\t\t}\n+\n+\t\tif (count_left > 0) {\n+\t\t\ti = count_left / DMA_ALIGN_BYTES;\n+\t\t\tif (i > 0) {\n+\t\t\t\tdma_rx_bytes = i * DMA_ALIGN_BYTES;\n+\t\t\t\tIFPGA_RAWDEV_PMD_DEBUG(\"left over 0x%\"PRIx64\" to DMA\",\n+\t\t\t\t\tdma_rx_bytes);\n+\t\t\t\tret = do_dma(ctx,\n+\t\t\t\t\tDMA_HOST_ADDR(ctx->dma_iova[0]),\n+\t\t\t\t\tsrc + offset,\n+\t\t\t\t\tdma_rx_bytes, 1, FPGA_TO_HOST, 0);\n+\t\t\t\tif (ret)\n+\t\t\t\t\treturn ret;\n+\t\t\t\tret = issue_magic(ctx);\n+\t\t\t\tif (ret)\n+\t\t\t\t\treturn ret;\n+\t\t\t\twait_magic(ctx);\n+\t\t\t\trte_memcpy((void *)(uintptr_t)(dst + offset),\n+\t\t\t\t\tctx->dma_buf[0], dma_rx_bytes);\n+\t\t\t}\n+\n+\t\t\tcount_left -= dma_rx_bytes;\n+\t\t\tif (count_left) {\n+\t\t\t\tIFPGA_RAWDEV_PMD_DEBUG(\"left over 0x%\"PRIx64\" to ASE\",\n+\t\t\t\t\tcount_left);\n+\t\t\t\tdst += offset + dma_rx_bytes;\n+\t\t\t\tsrc += offset + dma_rx_bytes;\n+\t\t\t\tret = ase_fpga_to_host(ctx, &src, &dst,\n+\t\t\t\t\t\t\tcount_left);\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\treturn ret;\n+}\n+\n+static int dma_fpga_to_fpga(struct dma_afu_ctx *ctx, uint64_t dst, uint64_t src,\n+\tsize_t count)\n+{\n+\tuint64_t i = 0;\n+\tuint64_t count_left = count;\n+\tuint64_t dma_chunks = 0;\n+\tuint64_t offset = 0;\n+\tuint32_t tx_chunks = 0;\n+\tuint64_t *tmp_buf = NULL;\n+\tint ret = 0;\n+\n+\tIFPGA_RAWDEV_PMD_DEBUG(\"0x%\"PRIx64\" ---> 0x%\"PRIx64\" (%zu)\", src, dst,\n+\t\tcount);\n+\n+\tif (!ctx)\n+\t\treturn -EINVAL;\n+\n+\tif (IS_DMA_ALIGNED(dst) && IS_DMA_ALIGNED(src)\n+\t    && IS_DMA_ALIGNED(count_left)) {\n+\t\tdma_chunks = count_left / ctx->dma_buf_size;\n+\t\toffset = dma_chunks * ctx->dma_buf_size;\n+\t\tcount_left -= offset;\n+\t\tIFPGA_RAWDEV_PMD_DEBUG(\"0x%\"PRIx64\" ---> 0x%\"PRIx64\n+\t\t\t\" (%\"PRIu64\"...0x%\"PRIx64\")\",\n+\t\t\tsrc, dst, dma_chunks, count_left);\n+\t\tfor (i = 0; i < dma_chunks; i++) {\n+\t\t\tret = do_dma(ctx, dst + i * ctx->dma_buf_size,\n+\t\t\t\tsrc + i * ctx->dma_buf_size,\n+\t\t\t\tctx->dma_buf_size, 0, FPGA_TO_FPGA, 0);\n+\t\t\tif (ret)\n+\t\t\t\treturn ret;\n+\t\t\tif ((((i + 1) % NUM_DMA_BUF) == 0) ||\n+\t\t\t\t(i == (dma_chunks - 1))) {\n+\t\t\t\tret = issue_magic(ctx);\n+\t\t\t\tif (ret)\n+\t\t\t\t\treturn ret;\n+\t\t\t\twait_magic(ctx);\n+\t\t\t}\n+\t\t}\n+\n+\t\tif (count_left > 0) {\n+\t\t\tIFPGA_RAWDEV_PMD_DEBUG(\"left over 0x%\"PRIx64\" to DMA\", count_left);\n+\t\t\tret = do_dma(ctx, dst + offset, src + offset,\n+\t\t\t\tcount_left, 1, FPGA_TO_FPGA, 0);\n+\t\t\tif (ret)\n+\t\t\t\treturn ret;\n+\t\t\tret = issue_magic(ctx);\n+\t\t\tif (ret)\n+\t\t\t\treturn ret;\n+\t\t\twait_magic(ctx);\n+\t\t}\n+\t} else {\n+\t\tif ((src < dst) && (src + count_left > dst)) {\n+\t\t\tIFPGA_RAWDEV_PMD_ERR(\"Overlapping: 0x%\"PRIx64\n+\t\t\t\t\" -> 0x%\"PRIx64\" (0x%\"PRIx64\")\",\n+\t\t\t\tsrc, dst, count_left);\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t\ttx_chunks = count_left / ctx->dma_buf_size;\n+\t\toffset = tx_chunks * ctx->dma_buf_size;\n+\t\tcount_left -= offset;\n+\t\tIFPGA_RAWDEV_PMD_DEBUG(\"0x%\"PRIx64\" --> 0x%\"PRIx64\n+\t\t\t\" (%u...0x%\"PRIx64\")\",\n+\t\t\tsrc, dst, tx_chunks, count_left);\n+\t\ttmp_buf = (uint64_t *)rte_malloc(NULL, ctx->dma_buf_size,\n+\t\t\tDMA_ALIGN_BYTES);\n+\t\tfor (i = 0; i < tx_chunks; i++) {\n+\t\t\tret = dma_fpga_to_host(ctx, (uint64_t)tmp_buf,\n+\t\t\t\tsrc + i * ctx->dma_buf_size,\n+\t\t\t\tctx->dma_buf_size);\n+\t\t\tif (ret)\n+\t\t\t\tgoto free_buf;\n+\t\t\tret = dma_host_to_fpga(ctx,\n+\t\t\t\tdst + i * ctx->dma_buf_size,\n+\t\t\t\t(uint64_t)tmp_buf, ctx->dma_buf_size);\n+\t\t\tif (ret)\n+\t\t\t\tgoto free_buf;\n+\t\t}\n+\n+\t\tif (count_left > 0) {\n+\t\t\tret = dma_fpga_to_host(ctx, (uint64_t)tmp_buf,\n+\t\t\t\tsrc + offset, count_left);\n+\t\t\tif (ret)\n+\t\t\t\tgoto free_buf;\n+\t\t\tret = dma_host_to_fpga(ctx, dst + offset,\n+\t\t\t\t(uint64_t)tmp_buf, count_left);\n+\t\t\tif (ret)\n+\t\t\t\tgoto free_buf;\n+\t\t}\n+free_buf:\n+\t\trte_free(tmp_buf);\n+\t}\n+\n+\treturn ret;\n+}\n+\n+static int dma_transfer_sync(struct dma_afu_ctx *ctx, uint64_t dst,\n+\tuint64_t src, size_t count, fpga_dma_type type)\n+{\n+\tint ret = 0;\n+\n+\tif (!ctx)\n+\t\treturn -EINVAL;\n+\n+\tif (type == HOST_TO_FPGA)\n+\t\tret = dma_host_to_fpga(ctx, dst, src, count);\n+\telse if (type == FPGA_TO_HOST)\n+\t\tret = dma_fpga_to_host(ctx, dst, src, count);\n+\telse if (type == FPGA_TO_FPGA)\n+\t\tret = dma_fpga_to_fpga(ctx, dst, src, count);\n+\telse\n+\t\treturn -EINVAL;\n+\n+\treturn ret;\n+}\n+\n+static double get_duration(struct timespec start, struct timespec end)\n+{\n+\tuint64_t diff = 1000000000L * (end.tv_sec - start.tv_sec)\n+\t\t+ end.tv_nsec - start.tv_nsec;\n+\treturn (double)diff / (double)1000000000L;\n+}\n+\n+#define SWEEP_ITERS 1\n+static int sweep_test(struct dma_afu_ctx *ctx, uint32_t length,\n+\tuint64_t ddr_offset, uint64_t buf_offset, uint64_t size_decrement)\n+{\n+\tstruct timespec start, end;\n+\tuint64_t test_size = 0;\n+\tuint64_t *dma_buf_ptr = NULL;\n+\tdouble throughput, total_time = 0.0;\n+\tint i = 0;\n+\tint ret = 0;\n+\n+\tif (!ctx || !ctx->data_buf || !ctx->ref_buf) {\n+\t\tIFPGA_RAWDEV_PMD_ERR(\"Buffer for DMA test is not allocated\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (length < (buf_offset + size_decrement)) {\n+\t\tIFPGA_RAWDEV_PMD_ERR(\"Test length does not match unaligned parameter\");\n+\t\treturn -EINVAL;\n+\t}\n+\ttest_size = length - (buf_offset + size_decrement);\n+\tif ((ddr_offset + test_size) > ctx->mem_size) {\n+\t\tIFPGA_RAWDEV_PMD_ERR(\"Test is out of DDR memory space\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tdma_buf_ptr = (uint64_t *)((uint8_t *)ctx->data_buf + buf_offset);\n+\tprintf(\"Sweep Host %p to FPGA 0x%\"PRIx64\n+\t\t\" with 0x%\"PRIx64\" bytes ...\\n\",\n+\t\t(void *)dma_buf_ptr, ddr_offset, test_size);\n+\n+\tfor (i = 0; i < SWEEP_ITERS; i++) {\n+\t\tclock_gettime(CLOCK_MONOTONIC, &start);\n+\t\tret = dma_transfer_sync(ctx, ddr_offset, (uint64_t)dma_buf_ptr,\n+\t\t\ttest_size, HOST_TO_FPGA);\n+\t\tclock_gettime(CLOCK_MONOTONIC, &end);\n+\t\tif (ret) {\n+\t\t\tIFPGA_RAWDEV_PMD_ERR(\"Failed\");\n+\t\t\treturn ret;\n+\t\t}\n+\t\ttotal_time += get_duration(start, end);\n+\t}\n+\tthroughput = (test_size * SWEEP_ITERS) / (total_time * 1000000);\n+\tprintf(\"Measured bandwidth = %lf MB/s\\n\", throughput);\n+\n+\tprintf(\"Sweep FPGA 0x%\"PRIx64\" to Host %p with 0x%\"PRIx64\" bytes ...\\n\",\n+\t\tddr_offset, (void *)dma_buf_ptr, test_size);\n+\n+\ttotal_time = 0.0;\n+\tmemset((char *)dma_buf_ptr, 0, test_size);\n+\tfor (i = 0; i < SWEEP_ITERS; i++) {\n+\t\tclock_gettime(CLOCK_MONOTONIC, &start);\n+\t\tret = dma_transfer_sync(ctx, (uint64_t)dma_buf_ptr, ddr_offset,\n+\t\t\ttest_size, FPGA_TO_HOST);\n+\t\tclock_gettime(CLOCK_MONOTONIC, &end);\n+\t\tif (ret) {\n+\t\t\tIFPGA_RAWDEV_PMD_ERR(\"Failed\");\n+\t\t\treturn ret;\n+\t\t}\n+\t\ttotal_time += get_duration(start, end);\n+\t}\n+\tthroughput = (test_size * SWEEP_ITERS) / (total_time * 1000000);\n+\tprintf(\"Measured bandwidth = %lf MB/s\\n\", throughput);\n+\n+\tprintf(\"Verifying buffer ...\\n\");\n+\treturn dma_afu_buf_verify(ctx, test_size);\n+}\n+\n+static int dma_afu_test(struct afu_rawdev *dev)\n+{\n+\tstruct n3000_afu_priv *priv = NULL;\n+\tstruct dma_afu_ctx *ctx = NULL;\n+\tstruct rte_pmd_afu_dma_cfg *cfg = NULL;\n+\tmsgdma_ctrl ctrl;\n+\tuint64_t offset = 0;\n+\tuint32_t i = 0;\n+\tint ret = 0;\n+\n+\tif (!dev)\n+\t\treturn -EINVAL;\n+\n+\tif (!dev->priv)\n+\t\treturn -ENOENT;\n+\n+\tpriv = (struct n3000_afu_priv *)dev->priv;\n+\tcfg = &priv->dma_cfg;\n+\tif (cfg->index >= NUM_N3000_DMA)\n+\t\treturn -EINVAL;\n+\tctx = &priv->dma_ctx[cfg->index];\n+\n+\tctx->pattern = (int)cfg->pattern;\n+\tctx->verbose = (int)cfg->verbose;\n+\tctx->dma_buf_size = cfg->size;\n+\n+\tret = dma_afu_buf_alloc(ctx, cfg);\n+\tif (ret)\n+\t\tgoto free;\n+\n+\tprintf(\"Initialize test buffer\\n\");\n+\tdma_afu_buf_init(ctx, cfg->length);\n+\n+\t/* enable interrupt */\n+\tctrl.csr = 0;\n+\tctrl.global_intr_en_mask = 1;\n+\trte_write32(ctrl.csr, CSR_CONTROL(ctx->csr_addr));\n+\n+\tprintf(\"Host %p to FPGA 0x%x with 0x%x bytes\\n\", ctx->data_buf,\n+\t\tcfg->offset, cfg->length);\n+\tret = dma_transfer_sync(ctx, cfg->offset, (uint64_t)ctx->data_buf,\n+\t\tcfg->length, HOST_TO_FPGA);\n+\tif (ret) {\n+\t\tIFPGA_RAWDEV_PMD_ERR(\"Failed to transfer data from host to FPGA\");\n+\t\tgoto end;\n+\t}\n+\tmemset(ctx->data_buf, 0, cfg->length);\n+\n+\tprintf(\"FPGA 0x%x to Host %p with 0x%x bytes\\n\", cfg->offset,\n+\t\tctx->data_buf, cfg->length);\n+\tret = dma_transfer_sync(ctx, (uint64_t)ctx->data_buf, cfg->offset,\n+\t\tcfg->length, FPGA_TO_HOST);\n+\tif (ret) {\n+\t\tIFPGA_RAWDEV_PMD_ERR(\"Failed to transfer data from FPGA to host\");\n+\t\tgoto end;\n+\t}\n+\tret = dma_afu_buf_verify(ctx, cfg->length);\n+\tif (ret)\n+\t\tgoto end;\n+\n+\tif ((cfg->offset + cfg->length * 2) <= ctx->mem_size)\n+\t\toffset = cfg->offset + cfg->length;\n+\telse if (cfg->offset > cfg->length)\n+\t\toffset = 0;\n+\telse\n+\t\tgoto end;\n+\n+\tprintf(\"FPGA 0x%x to FPGA 0x%\"PRIx64\" with 0x%x bytes\\n\",\n+\t\tcfg->offset, offset, cfg->length);\n+\tret = dma_transfer_sync(ctx, offset, cfg->offset, cfg->length,\n+\t\tFPGA_TO_FPGA);\n+\tif (ret) {\n+\t\tIFPGA_RAWDEV_PMD_ERR(\"Failed to transfer data from FPGA to FPGA\");\n+\t\tgoto end;\n+\t}\n+\n+\tprintf(\"FPGA 0x%\"PRIx64\" to Host %p with 0x%x bytes\\n\", offset,\n+\t\tctx->data_buf, cfg->length);\n+\tret = dma_transfer_sync(ctx, (uint64_t)ctx->data_buf, offset,\n+\t\tcfg->length, FPGA_TO_HOST);\n+\tif (ret) {\n+\t\tIFPGA_RAWDEV_PMD_ERR(\"Failed to transfer data from FPGA to host\");\n+\t\tgoto end;\n+\t}\n+\tret = dma_afu_buf_verify(ctx, cfg->length);\n+\tif (ret)\n+\t\tgoto end;\n+\n+\tprintf(\"Sweep with aligned address and size\\n\");\n+\tret = sweep_test(ctx, cfg->length, cfg->offset, 0, 0);\n+\tif (ret)\n+\t\tgoto end;\n+\n+\tif (cfg->unaligned) {\n+\t\tprintf(\"Sweep with unaligned address and size\\n\");\n+\t\tstruct unaligned_set {\n+\t\t\tuint64_t addr_offset;\n+\t\t\tuint64_t size_dec;\n+\t\t} param[] = {{61, 5}, {3, 0}, {7, 3}, {0, 3}, {0, 61}, {0, 7}};\n+\t\tfor (i = 0; i < ARRAY_SIZE(param); i++) {\n+\t\t\tret = sweep_test(ctx, cfg->length, cfg->offset,\n+\t\t\t\tparam[i].addr_offset, param[i].size_dec);\n+\t\t\tif (ret)\n+\t\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+end:\n+\t/* disable interrupt */\n+\tctrl.global_intr_en_mask = 0;\n+\trte_write32(ctrl.csr, CSR_CONTROL(ctx->csr_addr));\n+\n+free:\n+\tdma_afu_buf_free(ctx);\n+\treturn ret;\n+}\n+\n+static struct rte_pci_device *n3000_afu_get_pci_dev(struct afu_rawdev *dev)\n+{\n+\tstruct rte_afu_device *afudev = NULL;\n+\n+\tif (!dev || !dev->rawdev || !dev->rawdev->device)\n+\t\treturn NULL;\n+\n+\tafudev = RTE_DEV_TO_AFU(dev->rawdev->device);\n+\tif (!afudev->rawdev || !afudev->rawdev->device)\n+\t\treturn NULL;\n+\n+\treturn RTE_DEV_TO_PCI(afudev->rawdev->device);\n+}\n+\n+#ifdef VFIO_PRESENT\n+static int dma_afu_set_irqs(struct afu_rawdev *dev, uint32_t vec_start,\n+\tuint32_t count, int *efds)\n+{\n+\tstruct rte_pci_device *pci_dev = NULL;\n+\tstruct vfio_irq_set *irq_set = NULL;\n+\tint vfio_dev_fd = 0;\n+\tsize_t sz = 0;\n+\tint ret = 0;\n+\n+\tif (!dev || !efds || (count == 0) || (count > MAX_MSIX_VEC))\n+\t\treturn -EINVAL;\n+\n+\tpci_dev = n3000_afu_get_pci_dev(dev);\n+\tif (!pci_dev)\n+\t\treturn -ENODEV;\n+\tvfio_dev_fd = rte_intr_dev_fd_get(pci_dev->intr_handle);\n+\n+\tsz = sizeof(*irq_set) + sizeof(*efds) * count;\n+\tirq_set = rte_zmalloc(NULL, sz, 0);\n+\tif (!irq_set)\n+\t\treturn -ENOMEM;\n+\n+\tirq_set->argsz = (uint32_t)sz;\n+\tirq_set->count = count;\n+\tirq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |\n+\t\tVFIO_IRQ_SET_ACTION_TRIGGER;\n+\tirq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;\n+\tirq_set->start = vec_start;\n+\n+\trte_memcpy(&irq_set->data, efds, sizeof(*efds) * count);\n+\tret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);\n+\tif (ret)\n+\t\tIFPGA_RAWDEV_PMD_ERR(\"Error enabling MSI-X interrupts\\n\");\n+\n+\trte_free(irq_set);\n+\treturn ret;\n+}\n+#endif\n+\n+static void *n3000_afu_get_port_addr(struct afu_rawdev *dev)\n+{\n+\tstruct rte_pci_device *pci_dev = NULL;\n+\tuint8_t *addr = NULL;\n+\tuint64_t val = 0;\n+\tuint32_t bar = 0;\n+\n+\tpci_dev = n3000_afu_get_pci_dev(dev);\n+\tif (!pci_dev)\n+\t\treturn NULL;\n+\n+\taddr = (uint8_t *)pci_dev->mem_resource[0].addr;\n+\tval = rte_read64(addr + PORT_ATTR_REG(dev->port));\n+\tif (!PORT_IMPLEMENTED(val)) {\n+\t\tIFPGA_RAWDEV_PMD_INFO(\"FIU port %d is not implemented\", dev->port);\n+\t\treturn NULL;\n+\t}\n+\n+\tbar = PORT_BAR(val);\n+\tif (bar >= PCI_MAX_RESOURCE) {\n+\t\tIFPGA_RAWDEV_PMD_ERR(\"BAR index %u is out of limit\", bar);\n+\t\treturn NULL;\n+\t}\n+\n+\taddr = (uint8_t *)pci_dev->mem_resource[bar].addr + PORT_OFFSET(val);\n+\treturn addr;\n+}\n+\n+static int n3000_afu_get_irq_capability(struct afu_rawdev *dev,\n+\tuint32_t *vec_start, uint32_t *vec_count)\n+{\n+\tuint8_t *addr = NULL;\n+\tuint64_t val = 0;\n+\tuint64_t header = 0;\n+\tuint64_t next_offset = 0;\n+\n+\taddr = (uint8_t *)n3000_afu_get_port_addr(dev);\n+\tif (!addr)\n+\t\treturn -ENOENT;\n+\n+\tdo {\n+\t\taddr += next_offset;\n+\t\theader = rte_read64(addr);\n+\t\tif ((DFH_TYPE(header) == DFH_TYPE_PRIVATE) &&\n+\t\t\t(DFH_FEATURE_ID(header) == PORT_FEATURE_UINT_ID)) {\n+\t\t\tval = rte_read64(addr + PORT_UINT_CAP_REG);\n+\t\t\tif (vec_start)\n+\t\t\t\t*vec_start = PORT_VEC_START(val);\n+\t\t\tif (vec_count)\n+\t\t\t\t*vec_count = PORT_VEC_COUNT(val);\n+\t\t\treturn 0;\n+\t\t}\n+\t\tnext_offset = DFH_NEXT_OFFSET(header);\n+\t\tif (((next_offset & 0xffff) == 0xffff) || (next_offset == 0))\n+\t\t\tbreak;\n+\t} while (!DFH_EOL(header));\n+\n+\treturn -ENOENT;\n+}\n+\n+static int nlb_afu_ctx_release(struct afu_rawdev *dev)\n+{\n+\tstruct n3000_afu_priv *priv = NULL;\n+\tstruct nlb_afu_ctx *ctx = NULL;\n+\n+\tif (!dev)\n+\t\treturn -EINVAL;\n+\n+\tpriv = (struct n3000_afu_priv *)dev->priv;\n+\tif (!priv)\n+\t\treturn -ENOENT;\n+\n+\tctx = &priv->nlb_ctx;\n+\n+\trte_free(ctx->dsm_ptr);\n+\tctx->dsm_ptr = NULL;\n+\tctx->status_ptr = NULL;\n+\n+\trte_free(ctx->src_ptr);\n+\tctx->src_ptr = NULL;\n+\n+\trte_free(ctx->dest_ptr);\n+\tctx->dest_ptr = NULL;\n+\n+\treturn 0;\n+}\n+\n+static int nlb_afu_ctx_init(struct afu_rawdev *dev, uint8_t *addr)\n+{\n+\tstruct n3000_afu_priv *priv = NULL;\n+\tstruct nlb_afu_ctx *ctx = NULL;\n+\tint ret = 0;\n+\n+\tif (!dev || !addr)\n+\t\treturn -EINVAL;\n+\n+\tpriv = (struct n3000_afu_priv *)dev->priv;\n+\tif (!priv)\n+\t\treturn -ENOENT;\n+\n+\tctx = &priv->nlb_ctx;\n+\tctx->addr = addr;\n+\n+\tctx->dsm_ptr = (uint8_t *)rte_zmalloc(NULL, DSM_SIZE, TEST_MEM_ALIGN);\n+\tif (!ctx->dsm_ptr)\n+\t\treturn -ENOMEM;\n+\n+\tctx->dsm_iova = rte_malloc_virt2iova(ctx->dsm_ptr);\n+\tif (ctx->dsm_iova == RTE_BAD_IOVA) {\n+\t\tret = -ENOMEM;\n+\t\tgoto release_dsm;\n+\t}\n+\n+\tctx->src_ptr = (uint8_t *)rte_zmalloc(NULL, NLB_BUF_SIZE,\n+\t\tTEST_MEM_ALIGN);\n+\tif (!ctx->src_ptr) {\n+\t\tret = -ENOMEM;\n+\t\tgoto release_dsm;\n+\t}\n+\tctx->src_iova = rte_malloc_virt2iova(ctx->src_ptr);\n+\tif (ctx->src_iova == RTE_BAD_IOVA) {\n+\t\tret = -ENOMEM;\n+\t\tgoto release_src;\n+\t}\n+\n+\tctx->dest_ptr = (uint8_t *)rte_zmalloc(NULL, NLB_BUF_SIZE,\n+\t\tTEST_MEM_ALIGN);\n+\tif (!ctx->dest_ptr) {\n+\t\tret = -ENOMEM;\n+\t\tgoto release_src;\n+\t}\n+\tctx->dest_iova = rte_malloc_virt2iova(ctx->dest_ptr);\n+\tif (ctx->dest_iova == RTE_BAD_IOVA) {\n+\t\tret = -ENOMEM;\n+\t\tgoto release_dest;\n+\t}\n+\n+\tctx->status_ptr = (struct nlb_dsm_status *)(ctx->dsm_ptr + DSM_STATUS);\n+\treturn 0;\n+\n+release_dest:\n+\trte_free(ctx->dest_ptr);\n+\tctx->dest_ptr = NULL;\n+release_src:\n+\trte_free(ctx->src_ptr);\n+\tctx->src_ptr = NULL;\n+release_dsm:\n+\trte_free(ctx->dsm_ptr);\n+\tctx->dsm_ptr = NULL;\n+\treturn ret;\n+}\n+\n+static int dma_afu_ctx_release(struct afu_rawdev *dev)\n+{\n+\tstruct n3000_afu_priv *priv = NULL;\n+\tstruct dma_afu_ctx *ctx = NULL;\n+\n+\tif (!dev)\n+\t\treturn -EINVAL;\n+\n+\tpriv = (struct n3000_afu_priv *)dev->priv;\n+\tif (!priv)\n+\t\treturn -ENOENT;\n+\n+\tctx = &priv->dma_ctx[0];\n+\n+\trte_free(ctx->desc_buf);\n+\tctx->desc_buf = NULL;\n+\n+\trte_free(ctx->magic_buf);\n+\tctx->magic_buf = NULL;\n+\n+\tclose(ctx->event_fd);\n+\treturn 0;\n+}\n+\n+static int dma_afu_ctx_init(struct afu_rawdev *dev, int index, uint8_t *addr)\n+{\n+\tstruct n3000_afu_priv *priv = NULL;\n+\tstruct dma_afu_ctx *ctx = NULL;\n+\tuint64_t mem_sz[] = {0x100000000, 0x100000000, 0x40000000, 0x1000000};\n+\tstatic int efds[1] = {0};\n+\tuint32_t vec_start = 0;\n+\tint ret = 0;\n+\n+\tif (!dev || (index < 0) || (index >= NUM_N3000_DMA) || !addr)\n+\t\treturn -EINVAL;\n+\n+\tpriv = (struct n3000_afu_priv *)dev->priv;\n+\tif (!priv)\n+\t\treturn -ENOENT;\n+\n+\tctx = &priv->dma_ctx[index];\n+\tctx->index = index;\n+\tctx->addr = addr;\n+\tctx->csr_addr = addr + DMA_CSR;\n+\tctx->desc_addr = addr + DMA_DESC;\n+\tctx->ase_ctrl_addr = addr + DMA_ASE_CTRL;\n+\tctx->ase_data_addr = addr + DMA_ASE_DATA;\n+\tctx->mem_size = mem_sz[ctx->index];\n+\tctx->cur_ase_page = INVALID_ASE_PAGE;\n+\tif (ctx->index == 0) {\n+\t\tret = n3000_afu_get_irq_capability(dev, &vec_start, NULL);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\n+\t\tefds[0] = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);\n+\t\tif (efds[0] < 0) {\n+\t\t\tIFPGA_RAWDEV_PMD_ERR(\"eventfd create failed\");\n+\t\t\treturn -EBADF;\n+\t\t}\n+#ifdef VFIO_PRESENT\n+\t\tif (dma_afu_set_irqs(dev, vec_start, 1, efds))\n+\t\t\tIFPGA_RAWDEV_PMD_ERR(\"DMA interrupt setup failed\");\n+#endif\n+\t}\n+\tctx->event_fd = efds[0];\n+\n+\tctx->desc_buf = (msgdma_ext_desc *)rte_zmalloc(NULL,\n+\t\tsizeof(msgdma_ext_desc), DMA_ALIGN_BYTES);\n+\tif (!ctx->desc_buf) {\n+\t\tret = -ENOMEM;\n+\t\tgoto release;\n+\t}\n+\n+\tctx->magic_buf = (uint64_t *)rte_zmalloc(NULL, MAGIC_BUF_SIZE,\n+\t\tTEST_MEM_ALIGN);\n+\tif (!ctx->magic_buf) {\n+\t\tret = -ENOMEM;\n+\t\tgoto release;\n+\t}\n+\tctx->magic_iova = rte_malloc_virt2iova(ctx->magic_buf);\n+\tif (ctx->magic_iova == RTE_BAD_IOVA) {\n+\t\tret = -ENOMEM;\n+\t\tgoto release;\n+\t}\n+\n+\treturn 0;\n+\n+release:\n+\tdma_afu_ctx_release(dev);\n+\treturn ret;\n+}\n+\n+static int n3000_afu_ctx_init(struct afu_rawdev *dev)\n+{\n+\tstruct n3000_afu_priv *priv = NULL;\n+\tuint8_t *addr = NULL;\n+\tuint64_t header = 0;\n+\tuint64_t uuid_hi = 0;\n+\tuint64_t uuid_lo = 0;\n+\tuint64_t next_offset = 0;\n+\tint ret = 0;\n+\n+\tif (!dev)\n+\t\treturn -EINVAL;\n+\n+\tpriv = (struct n3000_afu_priv *)dev->priv;\n+\tif (!priv)\n+\t\treturn -ENOENT;\n+\n+\taddr = (uint8_t *)dev->addr;\n+\tdo {\n+\t\taddr += next_offset;\n+\t\theader = rte_read64(addr);\n+\t\tuuid_lo = rte_read64(addr + DFH_UUID_L_OFFSET);\n+\t\tuuid_hi = rte_read64(addr + DFH_UUID_H_OFFSET);\n+\n+\t\tif ((DFH_TYPE(header) == DFH_TYPE_AFU) &&\n+\t\t\t(uuid_lo == N3000_NLB0_UUID_L) &&\n+\t\t\t(uuid_hi == N3000_NLB0_UUID_H)) {\n+\t\t\tIFPGA_RAWDEV_PMD_INFO(\"AFU NLB0 found @ %p\", (void *)addr);\n+\t\t\tret = nlb_afu_ctx_init(dev, addr);\n+\t\t\tif (ret)\n+\t\t\t\treturn ret;\n+\t\t} else if ((DFH_TYPE(header) == DFH_TYPE_BBB) &&\n+\t\t\t(uuid_lo == N3000_DMA_UUID_L) &&\n+\t\t\t(uuid_hi == N3000_DMA_UUID_H) &&\n+\t\t\t(priv->num_dma < NUM_N3000_DMA)) {\n+\t\t\tIFPGA_RAWDEV_PMD_INFO(\"AFU DMA%d found @ %p\",\n+\t\t\t\tpriv->num_dma, (void *)addr);\n+\t\t\tret = dma_afu_ctx_init(dev, priv->num_dma, addr);\n+\t\t\tif (ret)\n+\t\t\t\treturn ret;\n+\t\t\tpriv->num_dma++;\n+\t\t} else {\n+\t\t\tIFPGA_RAWDEV_PMD_DEBUG(\"DFH: type %\"PRIu64\n+\t\t\t\t\", uuid %016\"PRIx64\"%016\"PRIx64,\n+\t\t\t\tDFH_TYPE(header), uuid_hi, uuid_lo);\n+\t\t}\n+\n+\t\tnext_offset = DFH_NEXT_OFFSET(header);\n+\t\tif (((next_offset & 0xffff) == 0xffff) || (next_offset == 0))\n+\t\t\tbreak;\n+\t} while (!DFH_EOL(header));\n+\n+\treturn 0;\n+}\n+\n+static int n3000_afu_init(struct afu_rawdev *dev)\n+{\n+\tif (!dev)\n+\t\treturn -EINVAL;\n+\n+\tif (!dev->priv) {\n+\t\tdev->priv = rte_zmalloc(NULL, sizeof(struct n3000_afu_priv), 0);\n+\t\tif (!dev->priv)\n+\t\t\treturn -ENOMEM;\n+\t}\n+\n+\treturn n3000_afu_ctx_init(dev);\n+}\n+\n+static int n3000_afu_config(struct afu_rawdev *dev, void *config,\n+\tsize_t config_size)\n+{\n+\tstruct n3000_afu_priv *priv = NULL;\n+\tstruct rte_pmd_afu_n3000_cfg *cfg = NULL;\n+\tint i = 0;\n+\tuint64_t top = 0;\n+\n+\tif (!dev || !config || !config_size)\n+\t\treturn -EINVAL;\n+\n+\tpriv = (struct n3000_afu_priv *)dev->priv;\n+\tif (!priv)\n+\t\treturn -ENOENT;\n+\n+\tif (config_size != sizeof(struct rte_pmd_afu_n3000_cfg))\n+\t\treturn -EINVAL;\n+\n+\tcfg = (struct rte_pmd_afu_n3000_cfg *)config;\n+\tif (cfg->type == RTE_PMD_AFU_N3000_NLB) {\n+\t\tif (cfg->nlb_cfg.mode != NLB_MODE_LPBK)\n+\t\t\treturn -EINVAL;\n+\t\tif ((cfg->nlb_cfg.read_vc > NLB_VC_RANDOM) ||\n+\t\t\t(cfg->nlb_cfg.write_vc > NLB_VC_RANDOM))\n+\t\t\treturn -EINVAL;\n+\t\tif (cfg->nlb_cfg.wrfence_vc > NLB_VC_VH1)\n+\t\t\treturn -EINVAL;\n+\t\tif (cfg->nlb_cfg.cache_hint > NLB_RDLINE_MIXED)\n+\t\t\treturn -EINVAL;\n+\t\tif (cfg->nlb_cfg.cache_policy > NLB_WRPUSH_I)\n+\t\t\treturn -EINVAL;\n+\t\tif ((cfg->nlb_cfg.multi_cl != 1) &&\n+\t\t\t(cfg->nlb_cfg.multi_cl != 2) &&\n+\t\t\t(cfg->nlb_cfg.multi_cl != 4))\n+\t\t\treturn -EINVAL;\n+\t\tif ((cfg->nlb_cfg.begin < MIN_CACHE_LINES) ||\n+\t\t\t(cfg->nlb_cfg.begin > MAX_CACHE_LINES))\n+\t\t\treturn -EINVAL;\n+\t\tif ((cfg->nlb_cfg.end < cfg->nlb_cfg.begin) ||\n+\t\t\t(cfg->nlb_cfg.end > MAX_CACHE_LINES))\n+\t\t\treturn -EINVAL;\n+\t\trte_memcpy(&priv->nlb_cfg, &cfg->nlb_cfg,\n+\t\t\tsizeof(struct rte_pmd_afu_nlb_cfg));\n+\t} else if (cfg->type == RTE_PMD_AFU_N3000_DMA) {\n+\t\tif (cfg->dma_cfg.index >= NUM_N3000_DMA)\n+\t\t\treturn -EINVAL;\n+\t\ti = cfg->dma_cfg.index;\n+\t\tif (cfg->dma_cfg.length > priv->dma_ctx[i].mem_size)\n+\t\t\treturn -EINVAL;\n+\t\tif (cfg->dma_cfg.offset >= priv->dma_ctx[i].mem_size)\n+\t\t\treturn -EINVAL;\n+\t\ttop = cfg->dma_cfg.length + cfg->dma_cfg.offset;\n+\t\tif ((top == 0) || (top > priv->dma_ctx[i].mem_size))\n+\t\t\treturn -EINVAL;\n+\t\tif (i == 3) {  /* QDR connected to DMA3 */\n+\t\t\tif (cfg->dma_cfg.length & 0x3f) {\n+\t\t\t\tcfg->dma_cfg.length &= ~0x3f;\n+\t\t\t\tIFPGA_RAWDEV_PMD_INFO(\"Round size to %x for QDR\",\n+\t\t\t\t\tcfg->dma_cfg.length);\n+\t\t\t}\n+\t\t}\n+\t\trte_memcpy(&priv->dma_cfg, &cfg->dma_cfg,\n+\t\t\tsizeof(struct rte_pmd_afu_dma_cfg));\n+\t} else {\n+\t\tIFPGA_RAWDEV_PMD_ERR(\"Invalid type of N3000 AFU\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tpriv->cfg_type = cfg->type;\n+\treturn 0;\n+}\n+\n+static int n3000_afu_test(struct afu_rawdev *dev)\n+{\n+\tstruct n3000_afu_priv *priv = NULL;\n+\tint ret = 0;\n+\n+\tif (!dev)\n+\t\treturn -EINVAL;\n+\n+\tif (!dev->priv)\n+\t\treturn -ENOENT;\n+\n+\tpriv = (struct n3000_afu_priv *)dev->priv;\n+\n+\tif (priv->cfg_type == RTE_PMD_AFU_N3000_NLB) {\n+\t\tIFPGA_RAWDEV_PMD_INFO(\"Test NLB\");\n+\t\tret = nlb_afu_test(dev);\n+\t} else if (priv->cfg_type == RTE_PMD_AFU_N3000_DMA) {\n+\t\tIFPGA_RAWDEV_PMD_INFO(\"Test DMA%u\", priv->dma_cfg.index);\n+\t\tret = dma_afu_test(dev);\n+\t} else {\n+\t\tIFPGA_RAWDEV_PMD_ERR(\"Please configure AFU before test\");\n+\t\tret = -EINVAL;\n+\t}\n+\n+\treturn ret;\n+}\n+\n+static int n3000_afu_close(struct afu_rawdev *dev)\n+{\n+\tif (!dev)\n+\t\treturn -EINVAL;\n+\n+\tnlb_afu_ctx_release(dev);\n+\tdma_afu_ctx_release(dev);\n+\n+\trte_free(dev->priv);\n+\tdev->priv = NULL;\n+\n+\treturn 0;\n+}\n+\n+static int n3000_afu_dump(struct afu_rawdev *dev, FILE *f)\n+{\n+\tstruct n3000_afu_priv *priv = NULL;\n+\n+\tif (!dev)\n+\t\treturn -EINVAL;\n+\n+\tpriv = (struct n3000_afu_priv *)dev->priv;\n+\tif (!priv)\n+\t\treturn -ENOENT;\n+\n+\tif (!f)\n+\t\tf = stdout;\n+\n+\tif (priv->cfg_type == RTE_PMD_AFU_N3000_NLB) {\n+\t\tstruct nlb_afu_ctx *ctx = &priv->nlb_ctx;\n+\t\tfprintf(f, \"addr:\\t\\t%p\\n\", (void *)ctx->addr);\n+\t\tfprintf(f, \"dsm_ptr:\\t%p\\n\", (void *)ctx->dsm_ptr);\n+\t\tfprintf(f, \"dsm_iova:\\t0x%\"PRIx64\"\\n\", ctx->dsm_iova);\n+\t\tfprintf(f, \"src_ptr:\\t%p\\n\", (void *)ctx->src_ptr);\n+\t\tfprintf(f, \"src_iova:\\t0x%\"PRIx64\"\\n\", ctx->src_iova);\n+\t\tfprintf(f, \"dest_ptr:\\t%p\\n\", (void *)ctx->dest_ptr);\n+\t\tfprintf(f, \"dest_iova:\\t0x%\"PRIx64\"\\n\", ctx->dest_iova);\n+\t\tfprintf(f, \"status_ptr:\\t%p\\n\", (void *)ctx->status_ptr);\n+\t} else if (priv->cfg_type == RTE_PMD_AFU_N3000_DMA) {\n+\t\tstruct dma_afu_ctx *ctx = &priv->dma_ctx[priv->dma_cfg.index];\n+\t\tfprintf(f, \"index:\\t\\t%d\\n\", ctx->index);\n+\t\tfprintf(f, \"addr:\\t\\t%p\\n\", (void *)ctx->addr);\n+\t\tfprintf(f, \"csr_addr:\\t%p\\n\", (void *)ctx->csr_addr);\n+\t\tfprintf(f, \"desc_addr:\\t%p\\n\", (void *)ctx->desc_addr);\n+\t\tfprintf(f, \"ase_ctrl_addr:\\t%p\\n\", (void *)ctx->ase_ctrl_addr);\n+\t\tfprintf(f, \"ase_data_addr:\\t%p\\n\", (void *)ctx->ase_data_addr);\n+\t\tfprintf(f, \"desc_buf:\\t%p\\n\", (void *)ctx->desc_buf);\n+\t\tfprintf(f, \"magic_buf:\\t%p\\n\", (void *)ctx->magic_buf);\n+\t\tfprintf(f, \"magic_iova:\\t0x%\"PRIx64\"\\n\", ctx->magic_iova);\n+\t} else {\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int n3000_afu_reset(struct afu_rawdev *dev)\n+{\n+\tuint8_t *addr = NULL;\n+\tuint64_t val = 0;\n+\n+\taddr = (uint8_t *)n3000_afu_get_port_addr(dev);\n+\tif (!addr)\n+\t\treturn -ENOENT;\n+\n+\tval = rte_read64(addr + PORT_CTRL_REG);\n+\tval |= PORT_SOFT_RESET;\n+\trte_write64(val, addr + PORT_CTRL_REG);\n+\trte_delay_us(100);\n+\tval &= ~PORT_SOFT_RESET;\n+\trte_write64(val, addr + PORT_CTRL_REG);\n+\n+\treturn 0;\n+}\n+\n+static struct afu_ops n3000_afu_ops = {\n+\t.init = n3000_afu_init,\n+\t.config = n3000_afu_config,\n+\t.start = NULL,\n+\t.stop = NULL,\n+\t.test = n3000_afu_test,\n+\t.close = n3000_afu_close,\n+\t.dump = n3000_afu_dump,\n+\t.reset = n3000_afu_reset\n+};\n+\n+static struct afu_rawdev_drv n3000_afu_drv = {\n+\t.uuid = { N3000_AFU_UUID_L, N3000_AFU_UUID_H },\n+\t.ops = &n3000_afu_ops\n+};\n+\n+AFU_PMD_REGISTER(n3000_afu_drv);\ndiff --git a/drivers/raw/ifpga/afu_pmd_n3000.h b/drivers/raw/ifpga/afu_pmd_n3000.h\nnew file mode 100644\nindex 0000000..67e83fe\n--- /dev/null\n+++ b/drivers/raw/ifpga/afu_pmd_n3000.h\n@@ -0,0 +1,339 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2022 Intel Corporation\n+ */\n+\n+#ifndef _AFU_PMD_N3000_H_\n+#define _AFU_PMD_N3000_H_\n+\n+#ifdef __cplusplus\n+extern \"C\" {\n+#endif\n+\n+#include \"afu_pmd_core.h\"\n+#include \"rte_pmd_afu.h\"\n+\n+#define N3000_AFU_UUID_L  0xc000c9660d824272\n+#define N3000_AFU_UUID_H  0x9aeffe5f84570612\n+#define N3000_NLB0_UUID_L 0xf89e433683f9040b\n+#define N3000_NLB0_UUID_H 0xd8424dc4a4a3c413\n+#define N3000_DMA_UUID_L  0xa9149a35bace01ea\n+#define N3000_DMA_UUID_H  0xef82def7f6ec40fc\n+\n+#define NUM_N3000_DMA  4\n+#define MAX_MSIX_VEC   7\n+\n+/* N3000 DFL definition */\n+#define DFH_UUID_L_OFFSET  8\n+#define DFH_UUID_H_OFFSET  16\n+#define DFH_TYPE(hdr)  (((hdr) >> 60) & 0xf)\n+#define DFH_TYPE_AFU  1\n+#define DFH_TYPE_BBB  2\n+#define DFH_TYPE_PRIVATE  3\n+#define DFH_EOL(hdr)  (((hdr) >> 40) & 0x1)\n+#define DFH_NEXT_OFFSET(hdr)  (((hdr) >> 16) & 0xffffff)\n+#define DFH_FEATURE_ID(hdr)  ((hdr) & 0xfff)\n+#define PORT_ATTR_REG(n)  (((n) << 3) + 0x38)\n+#define PORT_IMPLEMENTED(attr)  (((attr) >> 60) & 0x1)\n+#define PORT_BAR(attr)  (((attr) >> 32) & 0x7)\n+#define PORT_OFFSET(attr)  ((attr) & 0xffffff)\n+#define PORT_FEATURE_UINT_ID  0x12\n+#define PORT_UINT_CAP_REG  0x8\n+#define PORT_VEC_START(cap)  (((cap) >> 12) & 0xfff)\n+#define PORT_VEC_COUNT(cap)  ((cap) >> 12 & 0xfff)\n+#define PORT_CTRL_REG  0x38\n+#define PORT_SOFT_RESET  (0x1 << 0)\n+\n+/* NLB registers definition */\n+#define CSR_SCRATCHPAD0    0x100\n+#define CSR_SCRATCHPAD1    0x108\n+#define CSR_AFU_DSM_BASEL  0x110\n+#define CSR_AFU_DSM_BASEH  0x114\n+#define CSR_SRC_ADDR       0x120\n+#define CSR_DST_ADDR       0x128\n+#define CSR_NUM_LINES      0x130\n+#define CSR_CTL            0x138\n+#define CSR_CFG            0x140\n+#define CSR_INACT_THRESH   0x148\n+#define CSR_INTERRUPT0     0x150\n+#define CSR_SWTEST_MSG     0x158\n+#define CSR_STATUS0        0x160\n+#define CSR_STATUS1        0x168\n+#define CSR_ERROR          0x170\n+#define CSR_STRIDE         0x178\n+#define CSR_HE_INFO0       0x180\n+\n+#define DSM_SIZE           0x200000\n+#define DSM_STATUS         0x40\n+#define DSM_POLL_INTERVAL  5  /* ms */\n+#define DSM_TIMEOUT        1000  /* ms */\n+\n+#define NLB_BUF_SIZE  0x400000\n+#define TEST_MEM_ALIGN  1024\n+\n+struct nlb_csr_ctl {\n+\tunion {\n+\t\tuint32_t csr;\n+\t\tstruct {\n+\t\t\tuint32_t reset:1;\n+\t\t\tuint32_t start:1;\n+\t\t\tuint32_t force_completion:1;\n+\t\t\tuint32_t reserved:29;\n+\t\t};\n+\t};\n+};\n+\n+struct nlb_csr_cfg {\n+\tunion {\n+\t\tuint32_t csr;\n+\t\tstruct {\n+\t\t\tuint32_t wrthru_en:1;\n+\t\t\tuint32_t cont:1;\n+\t\t\tuint32_t mode:3;\n+\t\t\tuint32_t multicl_len:2;\n+\t\t\tuint32_t rsvd1:1;\n+\t\t\tuint32_t delay_en:1;\n+\t\t\tuint32_t rdsel:2;\n+\t\t\tuint32_t rsvd2:1;\n+\t\t\tuint32_t chsel:3;\n+\t\t\tuint32_t rsvd3:1;\n+\t\t\tuint32_t wrpush_i:1;\n+\t\t\tuint32_t wr_chsel:3;\n+\t\t\tuint32_t rsvd4:3;\n+\t\t\tuint32_t test_cfg:5;\n+\t\t\tuint32_t interrupt_on_error:1;\n+\t\t\tuint32_t interrupt_testmode:1;\n+\t\t\tuint32_t wrfence_chsel:2;\n+\t\t};\n+\t};\n+};\n+\n+struct nlb_status0 {\n+\tunion {\n+\t\tuint64_t csr;\n+\t\tstruct {\n+\t\t\tuint32_t num_writes;\n+\t\t\tuint32_t num_reads;\n+\t\t};\n+\t};\n+};\n+\n+struct nlb_status1 {\n+\tunion {\n+\t\tuint64_t csr;\n+\t\tstruct {\n+\t\t\tuint32_t num_pend_writes;\n+\t\t\tuint32_t num_pend_reads;\n+\t\t};\n+\t};\n+};\n+\n+struct nlb_dsm_status {\n+\tuint32_t test_complete;\n+\tuint32_t test_error;\n+\tuint64_t num_clocks;\n+\tuint32_t num_reads;\n+\tuint32_t num_writes;\n+\tuint32_t start_overhead;\n+\tuint32_t end_overhead;\n+};\n+\n+/* DMA registers definition */\n+#define DMA_CSR       0x40\n+#define DMA_DESC      0x60\n+#define DMA_ASE_CTRL  0x200\n+#define DMA_ASE_DATA  0x1000\n+\n+#define DMA_ASE_WINDOW       4096\n+#define DMA_ASE_WINDOW_MASK  ((uint64_t)(DMA_ASE_WINDOW - 1))\n+#define INVALID_ASE_PAGE     0xffffffffffffffffULL\n+\n+#define DMA_WF_MAGIC             0x5772745F53796E63ULL\n+#define DMA_WF_MAGIC_ROM         0x1000000000000\n+#define DMA_HOST_ADDR(addr)      ((addr) | 0x2000000000000)\n+#define DMA_WF_HOST_ADDR(addr)   ((addr) | 0x3000000000000)\n+\n+#define NUM_DMA_BUF   8\n+#define HALF_DMA_BUF  (NUM_DMA_BUF / 2)\n+\n+#define DMA_MASK_32_BIT 0xFFFFFFFF\n+\n+#define DMA_CSR_BUSY           0x1\n+#define DMA_DESC_BUFFER_EMPTY  0x2\n+#define DMA_DESC_BUFFER_FULL   0x4\n+\n+#define DWORD_BYTES 4\n+#define IS_ALIGNED_DWORD(addr) (((addr) % DWORD_BYTES) == 0)\n+\n+#define QWORD_BYTES 8\n+#define IS_ALIGNED_QWORD(addr) (((addr) % QWORD_BYTES) == 0)\n+\n+#define DMA_ALIGN_BYTES 64\n+#define IS_DMA_ALIGNED(addr) (((addr) % DMA_ALIGN_BYTES) == 0)\n+\n+#define CCIP_ALIGN_BYTES (DMA_ALIGN_BYTES << 2)\n+\n+#define DMA_TIMEOUT_MSEC  5000\n+\n+#define MAGIC_BUF_SIZE  64\n+#define ERR_CHECK_LIMIT  64\n+\n+#ifndef MIN\n+#define MIN(a, b) ((a) < (b) ? (a) : (b))\n+#endif\n+\n+#ifndef ARRAY_SIZE\n+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))\n+#endif\n+\n+typedef enum {\n+\tHOST_TO_FPGA = 0,\n+\tFPGA_TO_HOST,\n+\tFPGA_TO_FPGA,\n+\tFPGA_MAX_TRANSFER_TYPE,\n+} fpga_dma_type;\n+\n+typedef union {\n+\tuint32_t csr;\n+\tstruct {\n+\t\tuint32_t tx_channel:8;\n+\t\tuint32_t generate_sop:1;\n+\t\tuint32_t generate_eop:1;\n+\t\tuint32_t park_reads:1;\n+\t\tuint32_t park_writes:1;\n+\t\tuint32_t end_on_eop:1;\n+\t\tuint32_t reserved_1:1;\n+\t\tuint32_t transfer_irq_en:1;\n+\t\tuint32_t early_term_irq_en:1;\n+\t\tuint32_t trans_error_irq_en:8;\n+\t\tuint32_t early_done_en:1;\n+\t\tuint32_t reserved_2:6;\n+\t\tuint32_t go:1;\n+\t};\n+} msgdma_desc_ctrl;\n+\n+typedef struct __rte_packed {\n+\tuint32_t rd_address;\n+\tuint32_t wr_address;\n+\tuint32_t len;\n+\tuint16_t seq_num;\n+\tuint8_t rd_burst_count;\n+\tuint8_t wr_burst_count;\n+\tuint16_t rd_stride;\n+\tuint16_t wr_stride;\n+\tuint32_t rd_address_ext;\n+\tuint32_t wr_address_ext;\n+\tmsgdma_desc_ctrl control;\n+} msgdma_ext_desc;\n+\n+typedef union {\n+\tuint32_t csr;\n+\tstruct {\n+\t\tuint32_t busy:1;\n+\t\tuint32_t desc_buf_empty:1;\n+\t\tuint32_t desc_buf_full:1;\n+\t\tuint32_t rsp_buf_empty:1;\n+\t\tuint32_t rsp_buf_full:1;\n+\t\tuint32_t stopped:1;\n+\t\tuint32_t resetting:1;\n+\t\tuint32_t stopped_on_error:1;\n+\t\tuint32_t stopped_on_early_term:1;\n+\t\tuint32_t irq:1;\n+\t\tuint32_t reserved:22;\n+\t};\n+} msgdma_status;\n+\n+typedef union {\n+\tuint32_t csr;\n+\tstruct {\n+\t\tuint32_t stop_dispatcher:1;\n+\t\tuint32_t reset_dispatcher:1;\n+\t\tuint32_t stop_on_error:1;\n+\t\tuint32_t stopped_on_early_term:1;\n+\t\tuint32_t global_intr_en_mask:1;\n+\t\tuint32_t stop_descriptors:1;\n+\t\tuint32_t reserved:22;\n+\t};\n+} msgdma_ctrl;\n+\n+typedef union {\n+\tuint32_t csr;\n+\tstruct {\n+\t\tuint32_t rd_fill_level:16;\n+\t\tuint32_t wr_fill_level:16;\n+\t};\n+} msgdma_fill_level;\n+\n+typedef union {\n+\tuint32_t csr;\n+\tstruct {\n+\t\tuint32_t rsp_fill_level:16;\n+\t\tuint32_t reserved:16;\n+\t};\n+} msgdma_rsp_level;\n+\n+typedef union {\n+\tuint32_t csr;\n+\tstruct {\n+\t\tuint32_t rd_seq_num:16;\n+\t\tuint32_t wr_seq_num:16;\n+\t};\n+} msgdma_seq_num;\n+\n+typedef struct __rte_packed {\n+\tmsgdma_status status;\n+\tmsgdma_ctrl ctrl;\n+\tmsgdma_fill_level fill_level;\n+\tmsgdma_rsp_level rsp;\n+\tmsgdma_seq_num seq_num;\n+} msgdma_csr;\n+\n+#define CSR_STATUS(csr)   (&(((msgdma_csr *)(csr))->status))\n+#define CSR_CONTROL(csr)  (&(((msgdma_csr *)(csr))->ctrl))\n+\n+struct nlb_afu_ctx {\n+\tuint8_t *addr;\n+\tuint8_t *dsm_ptr;\n+\tuint64_t dsm_iova;\n+\tuint8_t *src_ptr;\n+\tuint64_t src_iova;\n+\tuint8_t *dest_ptr;\n+\tuint64_t dest_iova;\n+\tstruct nlb_dsm_status *status_ptr;\n+};\n+\n+struct dma_afu_ctx {\n+\tint index;\n+\tuint8_t *addr;\n+\tuint8_t *csr_addr;\n+\tuint8_t *desc_addr;\n+\tuint8_t *ase_ctrl_addr;\n+\tuint8_t *ase_data_addr;\n+\tuint64_t mem_size;\n+\tuint64_t cur_ase_page;\n+\tint event_fd;\n+\tint verbose;\n+\tint pattern;\n+\tvoid *data_buf;\n+\tvoid *ref_buf;\n+\tmsgdma_ext_desc *desc_buf;\n+\tuint64_t *magic_buf;\n+\tuint64_t magic_iova;\n+\tuint32_t dma_buf_size;\n+\tuint64_t *dma_buf[NUM_DMA_BUF];\n+\tuint64_t dma_iova[NUM_DMA_BUF];\n+};\n+\n+struct n3000_afu_priv {\n+\tstruct rte_pmd_afu_nlb_cfg nlb_cfg;\n+\tstruct rte_pmd_afu_dma_cfg dma_cfg;\n+\tstruct nlb_afu_ctx nlb_ctx;\n+\tstruct dma_afu_ctx dma_ctx[NUM_N3000_DMA];\n+\tint num_dma;\n+\tint cfg_type;\n+};\n+\n+#ifdef __cplusplus\n+}\n+#endif\n+\n+#endif /* _AFU_PMD_N3000_H_ */\ndiff --git a/drivers/raw/ifpga/meson.build b/drivers/raw/ifpga/meson.build\nindex d9a6f29..2294ab5 100644\n--- a/drivers/raw/ifpga/meson.build\n+++ b/drivers/raw/ifpga/meson.build\n@@ -13,7 +13,8 @@ objs = [base_objs]\n deps += ['ethdev', 'rawdev', 'pci', 'bus_pci', 'kvargs',\n     'bus_vdev', 'bus_ifpga', 'net', 'net_i40e', 'net_ipn3ke']\n \n-sources = files('ifpga_rawdev.c', 'rte_pmd_ifpga.c', 'afu_pmd_core.c')\n+sources = files('ifpga_rawdev.c', 'rte_pmd_ifpga.c', 'afu_pmd_core.c',\n+    'afu_pmd_n3000.c')\n \n includes += include_directories('base')\n includes += include_directories('../../net/ipn3ke')\ndiff --git a/drivers/raw/ifpga/rte_pmd_afu.h b/drivers/raw/ifpga/rte_pmd_afu.h\nnew file mode 100644\nindex 0000000..f14a053\n--- /dev/null\n+++ b/drivers/raw/ifpga/rte_pmd_afu.h\n@@ -0,0 +1,97 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright 2022 Intel Corporation\n+ */\n+\n+#ifndef __RTE_PMD_AFU_H__\n+#define __RTE_PMD_AFU_H__\n+\n+/**\n+ * @file rte_pmd_afu.h\n+ *\n+ * AFU PMD specific definitions.\n+ *\n+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice\n+ *\n+ */\n+\n+#ifdef __cplusplus\n+extern \"C\" {\n+#endif\n+\n+#include <stdint.h>\n+\n+#define RTE_PMD_AFU_N3000_NLB   1\n+#define RTE_PMD_AFU_N3000_DMA   2\n+\n+#define NLB_MODE_LPBK      0\n+#define NLB_MODE_READ      1\n+#define NLB_MODE_WRITE     2\n+#define NLB_MODE_TRPUT     3\n+\n+#define NLB_VC_AUTO        0\n+#define NLB_VC_VL0         1\n+#define NLB_VC_VH0         2\n+#define NLB_VC_VH1         3\n+#define NLB_VC_RANDOM      4\n+\n+#define NLB_WRLINE_M       0\n+#define NLB_WRLINE_I       1\n+#define NLB_WRPUSH_I       2\n+\n+#define NLB_RDLINE_S       0\n+#define NLB_RDLINE_I       1\n+#define NLB_RDLINE_MIXED   2\n+\n+#define MIN_CACHE_LINES   1\n+#define MAX_CACHE_LINES   1024\n+\n+#define MIN_DMA_BUF_SIZE  64\n+#define MAX_DMA_BUF_SIZE  (1023 * 1024)\n+\n+/**\n+ * NLB AFU configuration data structure.\n+ */\n+struct rte_pmd_afu_nlb_cfg {\n+\tuint32_t mode;\n+\tuint32_t begin;\n+\tuint32_t end;\n+\tuint32_t multi_cl;\n+\tuint32_t cont;\n+\tuint32_t timeout;\n+\tuint32_t cache_policy;\n+\tuint32_t cache_hint;\n+\tuint32_t read_vc;\n+\tuint32_t write_vc;\n+\tuint32_t wrfence_vc;\n+\tuint32_t freq_mhz;\n+};\n+\n+/**\n+ * DMA AFU configuration data structure.\n+ */\n+struct rte_pmd_afu_dma_cfg {\n+\tuint32_t index;     /* index of DMA controller */\n+\tuint32_t length;    /* total length of data to DMA */\n+\tuint32_t offset;    /* address offset of target memory */\n+\tuint32_t size;      /* size of transfer buffer */\n+\tuint32_t pattern;   /* data pattern to fill in test buffer */\n+\tuint32_t unaligned; /* use unaligned address or length in sweep test */\n+\tuint32_t verbose;   /* enable verbose error information in test */\n+};\n+\n+/**\n+ * N3000 AFU configuration data structure.\n+ */\n+struct rte_pmd_afu_n3000_cfg {\n+\tint type;   /* RTE_PMD_AFU_N3000_NLB or RTE_PMD_AFU_N3000_DMA */\n+\tunion {\n+\t\tstruct rte_pmd_afu_nlb_cfg nlb_cfg;\n+\t\tstruct rte_pmd_afu_dma_cfg dma_cfg;\n+\t};\n+};\n+\n+#ifdef __cplusplus\n+}\n+#endif\n+\n+#endif /* __RTE_PMD_AFU_H__ */\n",
    "prefixes": [
        "v8",
        "2/5"
    ]
}