get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/80020/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 80020,
    "url": "http://patches.dpdk.org/api/patches/80020/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20201008095133.123014-20-bruce.richardson@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20201008095133.123014-20-bruce.richardson@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20201008095133.123014-20-bruce.richardson@intel.com",
    "date": "2020-10-08T09:51:27",
    "name": "[v6,19/25] raw/ioat: add data path for idxd devices",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "bcae6e9612e66793ea4e5ef5c80045f73ddcf104",
    "submitter": {
        "id": 20,
        "url": "http://patches.dpdk.org/api/people/20/?format=api",
        "name": "Bruce Richardson",
        "email": "bruce.richardson@intel.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20201008095133.123014-20-bruce.richardson@intel.com/mbox/",
    "series": [
        {
            "id": 12773,
            "url": "http://patches.dpdk.org/api/series/12773/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=12773",
            "date": "2020-10-08T09:51:08",
            "name": "raw/ioat: enhancements and new hardware support",
            "version": 6,
            "mbox": "http://patches.dpdk.org/series/12773/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/80020/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/80020/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 17AE9A04BC;\n\tThu,  8 Oct 2020 11:58:17 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id ED5D41BFBC;\n\tThu,  8 Oct 2020 11:52:19 +0200 (CEST)",
            "from mga02.intel.com (mga02.intel.com [134.134.136.20])\n by dpdk.org (Postfix) with ESMTP id 7F1B71BF94\n for <dev@dpdk.org>; Thu,  8 Oct 2020 11:52:17 +0200 (CEST)",
            "from orsmga008.jf.intel.com ([10.7.209.65])\n by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 08 Oct 2020 02:52:17 -0700",
            "from silpixa00399126.ir.intel.com ([10.237.222.4])\n by orsmga008.jf.intel.com with ESMTP; 08 Oct 2020 02:52:15 -0700"
        ],
        "IronPort-SDR": [
            "\n 842h25ghFsRKTUzdZuddtyWUMUZuJBBm4jYUkQtYk1x9/wZ8R4xhCBWFhYtYbWrrSdS+Su1k7z\n t2zQs+0YU/Fw==",
            "\n 7N5tWopuZU4kKfMKDi8lpnl2wpReMfAK6ac9Kd7kAmw+2AKySQQEV7ucr4zNmuYGK7A3BIqqpJ\n ELAjjNZ43dvQ=="
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6000,8403,9767\"; a=\"152226402\"",
            "E=Sophos;i=\"5.77,350,1596524400\"; d=\"scan'208\";a=\"152226402\"",
            "E=Sophos;i=\"5.77,350,1596524400\"; d=\"scan'208\";a=\"344686750\""
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "From": "Bruce Richardson <bruce.richardson@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "patrick.fu@intel.com, thomas@monjalon.net,\n Bruce Richardson <bruce.richardson@intel.com>,\n Kevin Laatz <kevin.laatz@intel.com>, Radu Nicolau <radu.nicolau@intel.com>",
        "Date": "Thu,  8 Oct 2020 10:51:27 +0100",
        "Message-Id": "<20201008095133.123014-20-bruce.richardson@intel.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20201008095133.123014-1-bruce.richardson@intel.com>",
        "References": "<20200721095140.719297-1-bruce.richardson@intel.com>\n <20201008095133.123014-1-bruce.richardson@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Subject": "[dpdk-dev] [PATCH v6 19/25] raw/ioat: add data path for idxd devices",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Add support for doing copies using DSA hardware. This is implemented by\njust switching on the device type field at the start of the inline\nfunctions. Since there is no hardware which will have both device types\npresent this branch will always be predictable after the first call,\nmeaning it has little to no perf penalty.\n\nSigned-off-by: Bruce Richardson <bruce.richardson@intel.com>\nReviewed-by: Kevin Laatz <kevin.laatz@intel.com>\nAcked-by: Radu Nicolau <radu.nicolau@intel.com>\n---\n drivers/raw/ioat/ioat_common.c         |   1 +\n drivers/raw/ioat/ioat_rawdev.c         |   1 +\n drivers/raw/ioat/rte_ioat_rawdev_fns.h | 201 +++++++++++++++++++++++--\n 3 files changed, 192 insertions(+), 11 deletions(-)",
    "diff": "diff --git a/drivers/raw/ioat/ioat_common.c b/drivers/raw/ioat/ioat_common.c\nindex 5173c331c..6a4e2979f 100644\n--- a/drivers/raw/ioat/ioat_common.c\n+++ b/drivers/raw/ioat/ioat_common.c\n@@ -153,6 +153,7 @@ idxd_rawdev_create(const char *name, struct rte_device *dev,\n \n \tidxd = rawdev->dev_private;\n \t*idxd = *base_idxd; /* copy over the main fields already passed in */\n+\tidxd->public.type = RTE_IDXD_DEV;\n \tidxd->rawdev = rawdev;\n \tidxd->mz = mz;\n \ndiff --git a/drivers/raw/ioat/ioat_rawdev.c b/drivers/raw/ioat/ioat_rawdev.c\nindex 1fe32278d..0097be87e 100644\n--- a/drivers/raw/ioat/ioat_rawdev.c\n+++ b/drivers/raw/ioat/ioat_rawdev.c\n@@ -260,6 +260,7 @@ ioat_rawdev_create(const char *name, struct rte_pci_device *dev)\n \trawdev->driver_name = dev->device.driver->name;\n \n \tioat = rawdev->dev_private;\n+\tioat->type = RTE_IOAT_DEV;\n \tioat->rawdev = rawdev;\n \tioat->mz = mz;\n \tioat->regs = dev->mem_resource[0].addr;\ndiff --git a/drivers/raw/ioat/rte_ioat_rawdev_fns.h b/drivers/raw/ioat/rte_ioat_rawdev_fns.h\nindex e9cdce016..36ba876ea 100644\n--- a/drivers/raw/ioat/rte_ioat_rawdev_fns.h\n+++ b/drivers/raw/ioat/rte_ioat_rawdev_fns.h\n@@ -196,8 +196,8 @@ struct rte_idxd_rawdev {\n /*\n  * Enqueue a copy operation onto the ioat device\n  */\n-static inline int\n-rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,\n+static __rte_always_inline int\n+__ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,\n \t\tunsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)\n {\n \tstruct rte_ioat_rawdev *ioat =\n@@ -233,8 +233,8 @@ rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,\n }\n \n /* add fence to last written descriptor */\n-static inline int\n-rte_ioat_fence(int dev_id)\n+static __rte_always_inline int\n+__ioat_fence(int dev_id)\n {\n \tstruct rte_ioat_rawdev *ioat =\n \t\t\t(struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;\n@@ -252,8 +252,8 @@ rte_ioat_fence(int dev_id)\n /*\n  * Trigger hardware to begin performing enqueued operations\n  */\n-static inline void\n-rte_ioat_perform_ops(int dev_id)\n+static __rte_always_inline void\n+__ioat_perform_ops(int dev_id)\n {\n \tstruct rte_ioat_rawdev *ioat =\n \t\t\t(struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;\n@@ -268,8 +268,8 @@ rte_ioat_perform_ops(int dev_id)\n  * @internal\n  * Returns the index of the last completed operation.\n  */\n-static inline int\n-rte_ioat_get_last_completed(struct rte_ioat_rawdev *ioat, int *error)\n+static __rte_always_inline int\n+__ioat_get_last_completed(struct rte_ioat_rawdev *ioat, int *error)\n {\n \tuint64_t status = ioat->status;\n \n@@ -283,8 +283,8 @@ rte_ioat_get_last_completed(struct rte_ioat_rawdev *ioat, int *error)\n /*\n  * Returns details of operations that have been completed\n  */\n-static inline int\n-rte_ioat_completed_ops(int dev_id, uint8_t max_copies,\n+static __rte_always_inline int\n+__ioat_completed_ops(int dev_id, uint8_t max_copies,\n \t\tuintptr_t *src_hdls, uintptr_t *dst_hdls)\n {\n \tstruct rte_ioat_rawdev *ioat =\n@@ -295,7 +295,7 @@ rte_ioat_completed_ops(int dev_id, uint8_t max_copies,\n \tint error;\n \tint i = 0;\n \n-\tend_read = (rte_ioat_get_last_completed(ioat, &error) + 1) & mask;\n+\tend_read = (__ioat_get_last_completed(ioat, &error) + 1) & mask;\n \tcount = (end_read - (read & mask)) & mask;\n \n \tif (error) {\n@@ -332,6 +332,185 @@ rte_ioat_completed_ops(int dev_id, uint8_t max_copies,\n \treturn count;\n }\n \n+static __rte_always_inline int\n+__idxd_write_desc(int dev_id, const struct rte_idxd_hw_desc *desc,\n+\t\tconst struct rte_idxd_user_hdl *hdl)\n+{\n+\tstruct rte_idxd_rawdev *idxd =\n+\t\t\t(struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;\n+\tstruct rte_idxd_desc_batch *b = &idxd->batch_ring[idxd->next_batch];\n+\n+\t/* check for room in the handle ring */\n+\tif (((idxd->next_free_hdl + 1) & (idxd->hdl_ring_sz - 1)) == idxd->next_ret_hdl)\n+\t\tgoto failed;\n+\n+\t/* check for space in current batch */\n+\tif (b->op_count >= BATCH_SIZE)\n+\t\tgoto failed;\n+\n+\t/* check that we can actually use the current batch */\n+\tif (b->submitted)\n+\t\tgoto failed;\n+\n+\t/* write the descriptor */\n+\tb->ops[b->op_count++] = *desc;\n+\n+\t/* store the completion details */\n+\tif (!idxd->hdls_disable)\n+\t\tidxd->hdl_ring[idxd->next_free_hdl] = *hdl;\n+\tif (++idxd->next_free_hdl == idxd->hdl_ring_sz)\n+\t\tidxd->next_free_hdl = 0;\n+\n+\treturn 1;\n+\n+failed:\n+\trte_errno = ENOSPC;\n+\treturn 0;\n+}\n+\n+static __rte_always_inline int\n+__idxd_enqueue_copy(int dev_id, rte_iova_t src, rte_iova_t dst,\n+\t\tunsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)\n+{\n+\tconst struct rte_idxd_hw_desc desc = {\n+\t\t\t.op_flags =  (idxd_op_memmove << IDXD_CMD_OP_SHIFT) |\n+\t\t\t\tIDXD_FLAG_CACHE_CONTROL,\n+\t\t\t.src = src,\n+\t\t\t.dst = dst,\n+\t\t\t.size = length\n+\t};\n+\tconst struct rte_idxd_user_hdl hdl = {\n+\t\t\t.src = src_hdl,\n+\t\t\t.dst = dst_hdl\n+\t};\n+\treturn __idxd_write_desc(dev_id, &desc, &hdl);\n+}\n+\n+static __rte_always_inline int\n+__idxd_fence(int dev_id)\n+{\n+\tstatic const struct rte_idxd_hw_desc fence = {\n+\t\t\t.op_flags = IDXD_FLAG_FENCE\n+\t};\n+\tstatic const struct rte_idxd_user_hdl null_hdl;\n+\treturn __idxd_write_desc(dev_id, &fence, &null_hdl);\n+}\n+\n+static __rte_always_inline void\n+__idxd_movdir64b(volatile void *dst, const void *src)\n+{\n+\tasm volatile (\".byte 0x66, 0x0f, 0x38, 0xf8, 0x02\"\n+\t\t\t:\n+\t\t\t: \"a\" (dst), \"d\" (src));\n+}\n+\n+static __rte_always_inline void\n+__idxd_perform_ops(int dev_id)\n+{\n+\tstruct rte_idxd_rawdev *idxd =\n+\t\t\t(struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;\n+\tstruct rte_idxd_desc_batch *b = &idxd->batch_ring[idxd->next_batch];\n+\n+\tif (b->submitted || b->op_count == 0)\n+\t\treturn;\n+\tb->hdl_end = idxd->next_free_hdl;\n+\tb->comp.status = 0;\n+\tb->submitted = 1;\n+\tb->batch_desc.size = b->op_count + 1;\n+\t__idxd_movdir64b(idxd->portal, &b->batch_desc);\n+\n+\tif (++idxd->next_batch == idxd->batch_ring_sz)\n+\t\tidxd->next_batch = 0;\n+}\n+\n+static __rte_always_inline int\n+__idxd_completed_ops(int dev_id, uint8_t max_ops,\n+\t\tuintptr_t *src_hdls, uintptr_t *dst_hdls)\n+{\n+\tstruct rte_idxd_rawdev *idxd =\n+\t\t\t(struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private;\n+\tstruct rte_idxd_desc_batch *b = &idxd->batch_ring[idxd->next_completed];\n+\tuint16_t h_idx = idxd->next_ret_hdl;\n+\tint n = 0;\n+\n+\twhile (b->submitted && b->comp.status != 0) {\n+\t\tidxd->last_completed_hdl = b->hdl_end;\n+\t\tb->submitted = 0;\n+\t\tb->op_count = 0;\n+\t\tif (++idxd->next_completed == idxd->batch_ring_sz)\n+\t\t\tidxd->next_completed = 0;\n+\t\tb = &idxd->batch_ring[idxd->next_completed];\n+\t}\n+\n+\tif (!idxd->hdls_disable)\n+\t\tfor (n = 0; n < max_ops && h_idx != idxd->last_completed_hdl; n++) {\n+\t\t\tsrc_hdls[n] = idxd->hdl_ring[h_idx].src;\n+\t\t\tdst_hdls[n] = idxd->hdl_ring[h_idx].dst;\n+\t\t\tif (++h_idx == idxd->hdl_ring_sz)\n+\t\t\t\th_idx = 0;\n+\t\t}\n+\telse\n+\t\twhile (h_idx != idxd->last_completed_hdl) {\n+\t\t\tn++;\n+\t\t\tif (++h_idx == idxd->hdl_ring_sz)\n+\t\t\t\th_idx = 0;\n+\t\t}\n+\n+\tidxd->next_ret_hdl = h_idx;\n+\n+\treturn n;\n+}\n+\n+static inline int\n+rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,\n+\t\tunsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)\n+{\n+\tenum rte_ioat_dev_type *type =\n+\t\t\t(enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;\n+\tif (*type == RTE_IDXD_DEV)\n+\t\treturn __idxd_enqueue_copy(dev_id, src, dst, length,\n+\t\t\t\tsrc_hdl, dst_hdl);\n+\telse\n+\t\treturn __ioat_enqueue_copy(dev_id, src, dst, length,\n+\t\t\t\tsrc_hdl, dst_hdl);\n+}\n+\n+static inline int\n+rte_ioat_fence(int dev_id)\n+{\n+\tenum rte_ioat_dev_type *type =\n+\t\t\t(enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;\n+\tif (*type == RTE_IDXD_DEV)\n+\t\treturn __idxd_fence(dev_id);\n+\telse\n+\t\treturn __ioat_fence(dev_id);\n+}\n+\n+static inline void\n+rte_ioat_perform_ops(int dev_id)\n+{\n+\tenum rte_ioat_dev_type *type =\n+\t\t\t(enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;\n+\tif (*type == RTE_IDXD_DEV)\n+\t\treturn __idxd_perform_ops(dev_id);\n+\telse\n+\t\treturn __ioat_perform_ops(dev_id);\n+}\n+\n+static inline int\n+rte_ioat_completed_ops(int dev_id, uint8_t max_copies,\n+\t\tuintptr_t *src_hdls, uintptr_t *dst_hdls)\n+{\n+\tenum rte_ioat_dev_type *type =\n+\t\t\t(enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;\n+\tif (*type == RTE_IDXD_DEV)\n+\t\treturn __idxd_completed_ops(dev_id, max_copies,\n+\t\t\t\tsrc_hdls, dst_hdls);\n+\telse\n+\t\treturn __ioat_completed_ops(dev_id,  max_copies,\n+\t\t\t\tsrc_hdls, dst_hdls);\n+}\n+\n static inline void\n __rte_deprecated_msg(\"use rte_ioat_perform_ops() instead\")\n rte_ioat_do_copies(int dev_id) { rte_ioat_perform_ops(dev_id); }\n",
    "prefixes": [
        "v6",
        "19/25"
    ]
}