get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/92161/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 92161,
    "url": "http://patches.dpdk.org/api/patches/92161/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20210426095259.225354-10-bruce.richardson@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210426095259.225354-10-bruce.richardson@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210426095259.225354-10-bruce.richardson@intel.com",
    "date": "2021-04-26T09:52:56",
    "name": "[v2,09/12] raw/ioat: move idxd functions to separate file",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "f31c37ecc6a611390316241038482de04a24c32e",
    "submitter": {
        "id": 20,
        "url": "http://patches.dpdk.org/api/people/20/?format=api",
        "name": "Bruce Richardson",
        "email": "bruce.richardson@intel.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20210426095259.225354-10-bruce.richardson@intel.com/mbox/",
    "series": [
        {
            "id": 16669,
            "url": "http://patches.dpdk.org/api/series/16669/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=16669",
            "date": "2021-04-26T09:52:48",
            "name": "ioat driver updates",
            "version": 2,
            "mbox": "http://patches.dpdk.org/series/16669/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/92161/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/92161/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id C6A1EA0548;\n\tMon, 26 Apr 2021 11:54:11 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 0C623411D6;\n\tMon, 26 Apr 2021 11:53:30 +0200 (CEST)",
            "from mga02.intel.com (mga02.intel.com [134.134.136.20])\n by mails.dpdk.org (Postfix) with ESMTP id 54821411BF\n for <dev@dpdk.org>; Mon, 26 Apr 2021 11:53:26 +0200 (CEST)",
            "from orsmga008.jf.intel.com ([10.7.209.65])\n by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 26 Apr 2021 02:53:21 -0700",
            "from silpixa00399126.ir.intel.com ([10.237.223.81])\n by orsmga008.jf.intel.com with ESMTP; 26 Apr 2021 02:53:20 -0700"
        ],
        "IronPort-SDR": [
            "\n hcVdG781aXVTmtCJJsZ21SDxEUnKRKBC8tgdIxiOUtwze1YhdTxydUKgKidoW7pX4XryYKyyOh\n XlrGlcCErPmw==",
            "\n ATalc/ylTmbMwtD+r/F9tx0QGNEdp0ctvgdMMBgaUPZ12UGaIGtqg58DR3uJ8fEDsDw8+NGiqO\n TCXFXt3cnIDQ=="
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6200,9189,9965\"; a=\"183442911\"",
            "E=Sophos;i=\"5.82,252,1613462400\"; d=\"scan'208\";a=\"183442911\"",
            "E=Sophos;i=\"5.82,252,1613462400\"; d=\"scan'208\";a=\"429336944\""
        ],
        "X-ExtLoop1": "1",
        "From": "Bruce Richardson <bruce.richardson@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "kevin.laatz@intel.com, jiayu.hu@intel.com,\n Bruce Richardson <bruce.richardson@intel.com>",
        "Date": "Mon, 26 Apr 2021 10:52:56 +0100",
        "Message-Id": "<20210426095259.225354-10-bruce.richardson@intel.com>",
        "X-Mailer": "git-send-email 2.30.2",
        "In-Reply-To": "<20210426095259.225354-1-bruce.richardson@intel.com>",
        "References": "<20210318182042.43658-2-bruce.richardson@intel.com>\n <20210426095259.225354-1-bruce.richardson@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Subject": "[dpdk-dev] [PATCH v2 09/12] raw/ioat: move idxd functions to\n separate file",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Split the rte_ioat_rawdev_fns.h file into two separate headers, so that the\ndata structures for the original ioat devices and the newer idxd ones can\nbe kept separate from each other. This makes code management and rework\neasier.\n\nSigned-off-by: Bruce Richardson <bruce.richardson@intel.com>\n---\n drivers/raw/ioat/meson.build           |   1 +\n drivers/raw/ioat/rte_idxd_rawdev_fns.h | 275 ++++++++++++++++++++++\n drivers/raw/ioat/rte_ioat_rawdev_fns.h | 310 ++-----------------------\n 3 files changed, 299 insertions(+), 287 deletions(-)\n create mode 100644 drivers/raw/ioat/rte_idxd_rawdev_fns.h",
    "diff": "diff --git a/drivers/raw/ioat/meson.build b/drivers/raw/ioat/meson.build\nindex 6382a826e7..0e81cb5951 100644\n--- a/drivers/raw/ioat/meson.build\n+++ b/drivers/raw/ioat/meson.build\n@@ -13,5 +13,6 @@ sources = files(\n deps += ['bus_pci', 'mbuf', 'rawdev']\n headers = files(\n         'rte_ioat_rawdev.h',\n+        'rte_idxd_rawdev_fns.h',\n         'rte_ioat_rawdev_fns.h',\n )\ndiff --git a/drivers/raw/ioat/rte_idxd_rawdev_fns.h b/drivers/raw/ioat/rte_idxd_rawdev_fns.h\nnew file mode 100644\nindex 0000000000..c2a12ebef0\n--- /dev/null\n+++ b/drivers/raw/ioat/rte_idxd_rawdev_fns.h\n@@ -0,0 +1,275 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2021 Intel Corporation\n+ */\n+#ifndef _RTE_IDXD_RAWDEV_FNS_H_\n+#define _RTE_IDXD_RAWDEV_FNS_H_\n+\n+#include <stdint.h>\n+\n+/*\n+ * Defines used in the data path for interacting with hardware.\n+ */\n+#define IDXD_CMD_OP_SHIFT 24\n+enum rte_idxd_ops {\n+\tidxd_op_nop = 0,\n+\tidxd_op_batch,\n+\tidxd_op_drain,\n+\tidxd_op_memmove,\n+\tidxd_op_fill\n+};\n+\n+#define IDXD_FLAG_FENCE                 (1 << 0)\n+#define IDXD_FLAG_COMPLETION_ADDR_VALID (1 << 2)\n+#define IDXD_FLAG_REQUEST_COMPLETION    (1 << 3)\n+#define IDXD_FLAG_CACHE_CONTROL         (1 << 8)\n+\n+#define IOAT_COMP_UPDATE_SHIFT\t3\n+#define IOAT_CMD_OP_SHIFT\t24\n+enum rte_ioat_ops {\n+\tioat_op_copy = 0,\t/* Standard DMA Operation */\n+\tioat_op_fill\t\t/* Block Fill */\n+};\n+\n+/**\n+ * Hardware descriptor used by DSA hardware, for both bursts and\n+ * for individual operations.\n+ */\n+struct rte_idxd_hw_desc {\n+\tuint32_t pasid;\n+\tuint32_t op_flags;\n+\trte_iova_t completion;\n+\n+\tRTE_STD_C11\n+\tunion {\n+\t\trte_iova_t src;      /* source address for copy ops etc. */\n+\t\trte_iova_t desc_addr; /* descriptor pointer for batch */\n+\t};\n+\trte_iova_t dst;\n+\n+\tuint32_t size;    /* length of data for op, or batch size */\n+\n+\tuint16_t intr_handle; /* completion interrupt handle */\n+\n+\t/* remaining 26 bytes are reserved */\n+\tuint16_t __reserved[13];\n+} __rte_aligned(64);\n+\n+/**\n+ * Completion record structure written back by DSA\n+ */\n+struct rte_idxd_completion {\n+\tuint8_t status;\n+\tuint8_t result;\n+\t/* 16-bits pad here */\n+\tuint32_t completed_size; /* data length, or descriptors for batch */\n+\n+\trte_iova_t fault_address;\n+\tuint32_t invalid_flags;\n+} __rte_aligned(32);\n+\n+#define BATCH_SIZE 64\n+\n+/**\n+ * Structure used inside the driver for building up and submitting\n+ * a batch of operations to the DSA hardware.\n+ */\n+struct rte_idxd_desc_batch {\n+\tstruct rte_idxd_completion comp; /* the completion record for batch */\n+\n+\tuint16_t submitted;\n+\tuint16_t op_count;\n+\tuint16_t hdl_end;\n+\n+\tstruct rte_idxd_hw_desc batch_desc;\n+\n+\t/* batches must always have 2 descriptors, so put a null at the start */\n+\tstruct rte_idxd_hw_desc null_desc;\n+\tstruct rte_idxd_hw_desc ops[BATCH_SIZE];\n+};\n+\n+/**\n+ * structure used to save the \"handles\" provided by the user to be\n+ * returned to the user on job completion.\n+ */\n+struct rte_idxd_user_hdl {\n+\tuint64_t src;\n+\tuint64_t dst;\n+};\n+\n+/**\n+ * @internal\n+ * Structure representing an IDXD device instance\n+ */\n+struct rte_idxd_rawdev {\n+\tenum rte_ioat_dev_type type;\n+\tstruct rte_ioat_xstats xstats;\n+\n+\tvoid *portal; /* address to write the batch descriptor */\n+\n+\t/* counters to track the batches and the individual op handles */\n+\tuint16_t batch_ring_sz;  /* size of batch ring */\n+\tuint16_t hdl_ring_sz;    /* size of the user hdl ring */\n+\n+\tuint16_t next_batch;     /* where we write descriptor ops */\n+\tuint16_t next_completed; /* batch where we read completions */\n+\tuint16_t next_ret_hdl;   /* the next user hdl to return */\n+\tuint16_t last_completed_hdl; /* the last user hdl that has completed */\n+\tuint16_t next_free_hdl;  /* where the handle for next op will go */\n+\tuint16_t hdls_disable;   /* disable tracking completion handles */\n+\n+\tstruct rte_idxd_user_hdl *hdl_ring;\n+\tstruct rte_idxd_desc_batch *batch_ring;\n+};\n+\n+static __rte_always_inline int\n+__idxd_write_desc(int dev_id, const struct rte_idxd_hw_desc *desc,\n+\t\tconst struct rte_idxd_user_hdl *hdl)\n+{\n+\tstruct rte_idxd_rawdev *idxd =\n+\t\t\t(struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;\n+\tstruct rte_idxd_desc_batch *b = &idxd->batch_ring[idxd->next_batch];\n+\n+\t/* check for room in the handle ring */\n+\tif (((idxd->next_free_hdl + 1) & (idxd->hdl_ring_sz - 1)) == idxd->next_ret_hdl)\n+\t\tgoto failed;\n+\n+\t/* check for space in current batch */\n+\tif (b->op_count >= BATCH_SIZE)\n+\t\tgoto failed;\n+\n+\t/* check that we can actually use the current batch */\n+\tif (b->submitted)\n+\t\tgoto failed;\n+\n+\t/* write the descriptor */\n+\tb->ops[b->op_count++] = *desc;\n+\n+\t/* store the completion details */\n+\tif (!idxd->hdls_disable)\n+\t\tidxd->hdl_ring[idxd->next_free_hdl] = *hdl;\n+\tif (++idxd->next_free_hdl == idxd->hdl_ring_sz)\n+\t\tidxd->next_free_hdl = 0;\n+\n+\tidxd->xstats.enqueued++;\n+\treturn 1;\n+\n+failed:\n+\tidxd->xstats.enqueue_failed++;\n+\trte_errno = ENOSPC;\n+\treturn 0;\n+}\n+\n+static __rte_always_inline int\n+__idxd_enqueue_fill(int dev_id, uint64_t pattern, rte_iova_t dst,\n+\t\tunsigned int length, uintptr_t dst_hdl)\n+{\n+\tconst struct rte_idxd_hw_desc desc = {\n+\t\t\t.op_flags =  (idxd_op_fill << IDXD_CMD_OP_SHIFT) |\n+\t\t\t\tIDXD_FLAG_CACHE_CONTROL,\n+\t\t\t.src = pattern,\n+\t\t\t.dst = dst,\n+\t\t\t.size = length\n+\t};\n+\tconst struct rte_idxd_user_hdl hdl = {\n+\t\t\t.dst = dst_hdl\n+\t};\n+\treturn __idxd_write_desc(dev_id, &desc, &hdl);\n+}\n+\n+static __rte_always_inline int\n+__idxd_enqueue_copy(int dev_id, rte_iova_t src, rte_iova_t dst,\n+\t\tunsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)\n+{\n+\tconst struct rte_idxd_hw_desc desc = {\n+\t\t\t.op_flags =  (idxd_op_memmove << IDXD_CMD_OP_SHIFT) |\n+\t\t\t\tIDXD_FLAG_CACHE_CONTROL,\n+\t\t\t.src = src,\n+\t\t\t.dst = dst,\n+\t\t\t.size = length\n+\t};\n+\tconst struct rte_idxd_user_hdl hdl = {\n+\t\t\t.src = src_hdl,\n+\t\t\t.dst = dst_hdl\n+\t};\n+\treturn __idxd_write_desc(dev_id, &desc, &hdl);\n+}\n+\n+static __rte_always_inline int\n+__idxd_fence(int dev_id)\n+{\n+\tstatic const struct rte_idxd_hw_desc fence = {\n+\t\t\t.op_flags = IDXD_FLAG_FENCE\n+\t};\n+\tstatic const struct rte_idxd_user_hdl null_hdl;\n+\treturn __idxd_write_desc(dev_id, &fence, &null_hdl);\n+}\n+\n+static __rte_always_inline void\n+__idxd_movdir64b(volatile void *dst, const void *src)\n+{\n+\tasm volatile (\".byte 0x66, 0x0f, 0x38, 0xf8, 0x02\"\n+\t\t\t:\n+\t\t\t: \"a\" (dst), \"d\" (src));\n+}\n+\n+static __rte_always_inline int\n+__idxd_perform_ops(int dev_id)\n+{\n+\tstruct rte_idxd_rawdev *idxd =\n+\t\t\t(struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;\n+\tstruct rte_idxd_desc_batch *b = &idxd->batch_ring[idxd->next_batch];\n+\n+\tif (b->submitted || b->op_count == 0)\n+\t\treturn 0;\n+\tb->hdl_end = idxd->next_free_hdl;\n+\tb->comp.status = 0;\n+\tb->submitted = 1;\n+\tb->batch_desc.size = b->op_count + 1;\n+\t__idxd_movdir64b(idxd->portal, &b->batch_desc);\n+\n+\tif (++idxd->next_batch == idxd->batch_ring_sz)\n+\t\tidxd->next_batch = 0;\n+\tidxd->xstats.started = idxd->xstats.enqueued;\n+\treturn 0;\n+}\n+\n+static __rte_always_inline int\n+__idxd_completed_ops(int dev_id, uint8_t max_ops,\n+\t\tuintptr_t *src_hdls, uintptr_t *dst_hdls)\n+{\n+\tstruct rte_idxd_rawdev *idxd =\n+\t\t\t(struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;\n+\tstruct rte_idxd_desc_batch *b = &idxd->batch_ring[idxd->next_completed];\n+\tuint16_t h_idx = idxd->next_ret_hdl;\n+\tint n = 0;\n+\n+\twhile (b->submitted && b->comp.status != 0) {\n+\t\tidxd->last_completed_hdl = b->hdl_end;\n+\t\tb->submitted = 0;\n+\t\tb->op_count = 0;\n+\t\tif (++idxd->next_completed == idxd->batch_ring_sz)\n+\t\t\tidxd->next_completed = 0;\n+\t\tb = &idxd->batch_ring[idxd->next_completed];\n+\t}\n+\n+\tif (!idxd->hdls_disable)\n+\t\tfor (n = 0; n < max_ops && h_idx != idxd->last_completed_hdl; n++) {\n+\t\t\tsrc_hdls[n] = idxd->hdl_ring[h_idx].src;\n+\t\t\tdst_hdls[n] = idxd->hdl_ring[h_idx].dst;\n+\t\t\tif (++h_idx == idxd->hdl_ring_sz)\n+\t\t\t\th_idx = 0;\n+\t\t}\n+\telse\n+\t\twhile (h_idx != idxd->last_completed_hdl) {\n+\t\t\tn++;\n+\t\t\tif (++h_idx == idxd->hdl_ring_sz)\n+\t\t\t\th_idx = 0;\n+\t\t}\n+\n+\tidxd->next_ret_hdl = h_idx;\n+\n+\tidxd->xstats.completed += n;\n+\treturn n;\n+}\n+\n+#endif\ndiff --git a/drivers/raw/ioat/rte_ioat_rawdev_fns.h b/drivers/raw/ioat/rte_ioat_rawdev_fns.h\nindex 477c1b7b41..598852b1fa 100644\n--- a/drivers/raw/ioat/rte_ioat_rawdev_fns.h\n+++ b/drivers/raw/ioat/rte_ioat_rawdev_fns.h\n@@ -9,6 +9,29 @@\n #include <rte_memzone.h>\n #include <rte_prefetch.h>\n \n+/**\n+ * @internal\n+ * Identify the data path to use.\n+ * Must be first field of rte_ioat_rawdev and rte_idxd_rawdev structs\n+ */\n+enum rte_ioat_dev_type {\n+\tRTE_IOAT_DEV,\n+\tRTE_IDXD_DEV,\n+};\n+\n+/**\n+ * @internal\n+ * some statistics for tracking, if added/changed update xstats fns\n+ */\n+struct rte_ioat_xstats {\n+\tuint64_t enqueue_failed;\n+\tuint64_t enqueued;\n+\tuint64_t started;\n+\tuint64_t completed;\n+};\n+\n+#include \"rte_idxd_rawdev_fns.h\"\n+\n /**\n  * @internal\n  * Structure representing a device descriptor\n@@ -39,27 +62,6 @@ struct rte_ioat_generic_hw_desc {\n \tuint64_t op_specific[4];\n };\n \n-/**\n- * @internal\n- * Identify the data path to use.\n- * Must be first field of rte_ioat_rawdev and rte_idxd_rawdev structs\n- */\n-enum rte_ioat_dev_type {\n-\tRTE_IOAT_DEV,\n-\tRTE_IDXD_DEV,\n-};\n-\n-/**\n- * @internal\n- * some statistics for tracking, if added/changed update xstats fns\n- */\n-struct rte_ioat_xstats {\n-\tuint64_t enqueue_failed;\n-\tuint64_t enqueued;\n-\tuint64_t started;\n-\tuint64_t completed;\n-};\n-\n /**\n  * @internal\n  * Structure representing an IOAT device instance\n@@ -98,121 +100,6 @@ struct rte_ioat_rawdev {\n #define RTE_IOAT_CHANSTS_HALTED\t\t\t0x3\n #define RTE_IOAT_CHANSTS_ARMED\t\t\t0x4\n \n-/*\n- * Defines used in the data path for interacting with hardware.\n- */\n-#define IDXD_CMD_OP_SHIFT 24\n-enum rte_idxd_ops {\n-\tidxd_op_nop = 0,\n-\tidxd_op_batch,\n-\tidxd_op_drain,\n-\tidxd_op_memmove,\n-\tidxd_op_fill\n-};\n-\n-#define IDXD_FLAG_FENCE                 (1 << 0)\n-#define IDXD_FLAG_COMPLETION_ADDR_VALID (1 << 2)\n-#define IDXD_FLAG_REQUEST_COMPLETION    (1 << 3)\n-#define IDXD_FLAG_CACHE_CONTROL         (1 << 8)\n-\n-#define IOAT_COMP_UPDATE_SHIFT\t3\n-#define IOAT_CMD_OP_SHIFT\t24\n-enum rte_ioat_ops {\n-\tioat_op_copy = 0,\t/* Standard DMA Operation */\n-\tioat_op_fill\t\t/* Block Fill */\n-};\n-\n-/**\n- * Hardware descriptor used by DSA hardware, for both bursts and\n- * for individual operations.\n- */\n-struct rte_idxd_hw_desc {\n-\tuint32_t pasid;\n-\tuint32_t op_flags;\n-\trte_iova_t completion;\n-\n-\tRTE_STD_C11\n-\tunion {\n-\t\trte_iova_t src;      /* source address for copy ops etc. */\n-\t\trte_iova_t desc_addr; /* descriptor pointer for batch */\n-\t};\n-\trte_iova_t dst;\n-\n-\tuint32_t size;    /* length of data for op, or batch size */\n-\n-\tuint16_t intr_handle; /* completion interrupt handle */\n-\n-\t/* remaining 26 bytes are reserved */\n-\tuint16_t __reserved[13];\n-} __rte_aligned(64);\n-\n-/**\n- * Completion record structure written back by DSA\n- */\n-struct rte_idxd_completion {\n-\tuint8_t status;\n-\tuint8_t result;\n-\t/* 16-bits pad here */\n-\tuint32_t completed_size; /* data length, or descriptors for batch */\n-\n-\trte_iova_t fault_address;\n-\tuint32_t invalid_flags;\n-} __rte_aligned(32);\n-\n-#define BATCH_SIZE 64\n-\n-/**\n- * Structure used inside the driver for building up and submitting\n- * a batch of operations to the DSA hardware.\n- */\n-struct rte_idxd_desc_batch {\n-\tstruct rte_idxd_completion comp; /* the completion record for batch */\n-\n-\tuint16_t submitted;\n-\tuint16_t op_count;\n-\tuint16_t hdl_end;\n-\n-\tstruct rte_idxd_hw_desc batch_desc;\n-\n-\t/* batches must always have 2 descriptors, so put a null at the start */\n-\tstruct rte_idxd_hw_desc null_desc;\n-\tstruct rte_idxd_hw_desc ops[BATCH_SIZE];\n-};\n-\n-/**\n- * structure used to save the \"handles\" provided by the user to be\n- * returned to the user on job completion.\n- */\n-struct rte_idxd_user_hdl {\n-\tuint64_t src;\n-\tuint64_t dst;\n-};\n-\n-/**\n- * @internal\n- * Structure representing an IDXD device instance\n- */\n-struct rte_idxd_rawdev {\n-\tenum rte_ioat_dev_type type;\n-\tstruct rte_ioat_xstats xstats;\n-\n-\tvoid *portal; /* address to write the batch descriptor */\n-\n-\t/* counters to track the batches and the individual op handles */\n-\tuint16_t batch_ring_sz;  /* size of batch ring */\n-\tuint16_t hdl_ring_sz;    /* size of the user hdl ring */\n-\n-\tuint16_t next_batch;     /* where we write descriptor ops */\n-\tuint16_t next_completed; /* batch where we read completions */\n-\tuint16_t next_ret_hdl;   /* the next user hdl to return */\n-\tuint16_t last_completed_hdl; /* the last user hdl that has completed */\n-\tuint16_t next_free_hdl;  /* where the handle for next op will go */\n-\tuint16_t hdls_disable;   /* disable tracking completion handles */\n-\n-\tstruct rte_idxd_user_hdl *hdl_ring;\n-\tstruct rte_idxd_desc_batch *batch_ring;\n-};\n-\n static __rte_always_inline int\n __ioat_write_desc(int dev_id, uint32_t op, uint64_t src, phys_addr_t dst,\n \t\tunsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)\n@@ -373,157 +260,6 @@ __ioat_completed_ops(int dev_id, uint8_t max_copies,\n \treturn count;\n }\n \n-static __rte_always_inline int\n-__idxd_write_desc(int dev_id, const struct rte_idxd_hw_desc *desc,\n-\t\tconst struct rte_idxd_user_hdl *hdl)\n-{\n-\tstruct rte_idxd_rawdev *idxd =\n-\t\t\t(struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;\n-\tstruct rte_idxd_desc_batch *b = &idxd->batch_ring[idxd->next_batch];\n-\n-\t/* check for room in the handle ring */\n-\tif (((idxd->next_free_hdl + 1) & (idxd->hdl_ring_sz - 1)) == idxd->next_ret_hdl)\n-\t\tgoto failed;\n-\n-\t/* check for space in current batch */\n-\tif (b->op_count >= BATCH_SIZE)\n-\t\tgoto failed;\n-\n-\t/* check that we can actually use the current batch */\n-\tif (b->submitted)\n-\t\tgoto failed;\n-\n-\t/* write the descriptor */\n-\tb->ops[b->op_count++] = *desc;\n-\n-\t/* store the completion details */\n-\tif (!idxd->hdls_disable)\n-\t\tidxd->hdl_ring[idxd->next_free_hdl] = *hdl;\n-\tif (++idxd->next_free_hdl == idxd->hdl_ring_sz)\n-\t\tidxd->next_free_hdl = 0;\n-\n-\tidxd->xstats.enqueued++;\n-\treturn 1;\n-\n-failed:\n-\tidxd->xstats.enqueue_failed++;\n-\trte_errno = ENOSPC;\n-\treturn 0;\n-}\n-\n-static __rte_always_inline int\n-__idxd_enqueue_fill(int dev_id, uint64_t pattern, rte_iova_t dst,\n-\t\tunsigned int length, uintptr_t dst_hdl)\n-{\n-\tconst struct rte_idxd_hw_desc desc = {\n-\t\t\t.op_flags =  (idxd_op_fill << IDXD_CMD_OP_SHIFT) |\n-\t\t\t\tIDXD_FLAG_CACHE_CONTROL,\n-\t\t\t.src = pattern,\n-\t\t\t.dst = dst,\n-\t\t\t.size = length\n-\t};\n-\tconst struct rte_idxd_user_hdl hdl = {\n-\t\t\t.dst = dst_hdl\n-\t};\n-\treturn __idxd_write_desc(dev_id, &desc, &hdl);\n-}\n-\n-static __rte_always_inline int\n-__idxd_enqueue_copy(int dev_id, rte_iova_t src, rte_iova_t dst,\n-\t\tunsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)\n-{\n-\tconst struct rte_idxd_hw_desc desc = {\n-\t\t\t.op_flags =  (idxd_op_memmove << IDXD_CMD_OP_SHIFT) |\n-\t\t\t\tIDXD_FLAG_CACHE_CONTROL,\n-\t\t\t.src = src,\n-\t\t\t.dst = dst,\n-\t\t\t.size = length\n-\t};\n-\tconst struct rte_idxd_user_hdl hdl = {\n-\t\t\t.src = src_hdl,\n-\t\t\t.dst = dst_hdl\n-\t};\n-\treturn __idxd_write_desc(dev_id, &desc, &hdl);\n-}\n-\n-static __rte_always_inline int\n-__idxd_fence(int dev_id)\n-{\n-\tstatic const struct rte_idxd_hw_desc fence = {\n-\t\t\t.op_flags = IDXD_FLAG_FENCE\n-\t};\n-\tstatic const struct rte_idxd_user_hdl null_hdl;\n-\treturn __idxd_write_desc(dev_id, &fence, &null_hdl);\n-}\n-\n-static __rte_always_inline void\n-__idxd_movdir64b(volatile void *dst, const void *src)\n-{\n-\tasm volatile (\".byte 0x66, 0x0f, 0x38, 0xf8, 0x02\"\n-\t\t\t:\n-\t\t\t: \"a\" (dst), \"d\" (src));\n-}\n-\n-static __rte_always_inline int\n-__idxd_perform_ops(int dev_id)\n-{\n-\tstruct rte_idxd_rawdev *idxd =\n-\t\t\t(struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;\n-\tstruct rte_idxd_desc_batch *b = &idxd->batch_ring[idxd->next_batch];\n-\n-\tif (b->submitted || b->op_count == 0)\n-\t\treturn 0;\n-\tb->hdl_end = idxd->next_free_hdl;\n-\tb->comp.status = 0;\n-\tb->submitted = 1;\n-\tb->batch_desc.size = b->op_count + 1;\n-\t__idxd_movdir64b(idxd->portal, &b->batch_desc);\n-\n-\tif (++idxd->next_batch == idxd->batch_ring_sz)\n-\t\tidxd->next_batch = 0;\n-\tidxd->xstats.started = idxd->xstats.enqueued;\n-\treturn 0;\n-}\n-\n-static __rte_always_inline int\n-__idxd_completed_ops(int dev_id, uint8_t max_ops,\n-\t\tuintptr_t *src_hdls, uintptr_t *dst_hdls)\n-{\n-\tstruct rte_idxd_rawdev *idxd =\n-\t\t\t(struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;\n-\tstruct rte_idxd_desc_batch *b = &idxd->batch_ring[idxd->next_completed];\n-\tuint16_t h_idx = idxd->next_ret_hdl;\n-\tint n = 0;\n-\n-\twhile (b->submitted && b->comp.status != 0) {\n-\t\tidxd->last_completed_hdl = b->hdl_end;\n-\t\tb->submitted = 0;\n-\t\tb->op_count = 0;\n-\t\tif (++idxd->next_completed == idxd->batch_ring_sz)\n-\t\t\tidxd->next_completed = 0;\n-\t\tb = &idxd->batch_ring[idxd->next_completed];\n-\t}\n-\n-\tif (!idxd->hdls_disable)\n-\t\tfor (n = 0; n < max_ops && h_idx != idxd->last_completed_hdl; n++) {\n-\t\t\tsrc_hdls[n] = idxd->hdl_ring[h_idx].src;\n-\t\t\tdst_hdls[n] = idxd->hdl_ring[h_idx].dst;\n-\t\t\tif (++h_idx == idxd->hdl_ring_sz)\n-\t\t\t\th_idx = 0;\n-\t\t}\n-\telse\n-\t\twhile (h_idx != idxd->last_completed_hdl) {\n-\t\t\tn++;\n-\t\t\tif (++h_idx == idxd->hdl_ring_sz)\n-\t\t\t\th_idx = 0;\n-\t\t}\n-\n-\tidxd->next_ret_hdl = h_idx;\n-\n-\tidxd->xstats.completed += n;\n-\treturn n;\n-}\n-\n static inline int\n rte_ioat_enqueue_fill(int dev_id, uint64_t pattern, phys_addr_t dst,\n \t\tunsigned int len, uintptr_t dst_hdl)\n",
    "prefixes": [
        "v2",
        "09/12"
    ]
}