get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/102253/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 102253,
    "url": "https://patches.dpdk.org/api/patches/102253/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20211019141041.1890983-10-kevin.laatz@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20211019141041.1890983-10-kevin.laatz@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20211019141041.1890983-10-kevin.laatz@intel.com",
    "date": "2021-10-19T14:10:34",
    "name": "[v10,09/16] dma/idxd: add data-path job submission functions",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "9342e0982b6c656c0f183ac59923994a84a0cf13",
    "submitter": {
        "id": 921,
        "url": "https://patches.dpdk.org/api/people/921/?format=api",
        "name": "Kevin Laatz",
        "email": "kevin.laatz@intel.com"
    },
    "delegate": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20211019141041.1890983-10-kevin.laatz@intel.com/mbox/",
    "series": [
        {
            "id": 19797,
            "url": "https://patches.dpdk.org/api/series/19797/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=19797",
            "date": "2021-10-19T14:10:29",
            "name": "add dmadev driver for idxd devices",
            "version": 10,
            "mbox": "https://patches.dpdk.org/series/19797/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/102253/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/102253/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id DEA68A0C41;\n\tTue, 19 Oct 2021 16:30:19 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 7DEA541218;\n\tTue, 19 Oct 2021 16:29:43 +0200 (CEST)",
            "from mga17.intel.com (mga17.intel.com [192.55.52.151])\n by mails.dpdk.org (Postfix) with ESMTP id 5F53C4120B\n for <dev@dpdk.org>; Tue, 19 Oct 2021 16:29:40 +0200 (CEST)",
            "from orsmga006.jf.intel.com ([10.7.209.51])\n by fmsmga107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 19 Oct 2021 07:11:04 -0700",
            "from silpixa00401122.ir.intel.com ([10.55.128.10])\n by orsmga006.jf.intel.com with ESMTP; 19 Oct 2021 07:11:01 -0700"
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6200,9189,10142\"; a=\"209322955\"",
            "E=Sophos;i=\"5.87,164,1631602800\"; d=\"scan'208\";a=\"209322955\"",
            "E=Sophos;i=\"5.87,164,1631602800\"; d=\"scan'208\";a=\"443893616\""
        ],
        "X-ExtLoop1": "1",
        "From": "Kevin Laatz <kevin.laatz@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "thomas@monjalon.net, bruce.richardson@intel.com, fengchengwen@huawei.com,\n jerinj@marvell.com, conor.walsh@intel.com,\n Kevin Laatz <kevin.laatz@intel.com>",
        "Date": "Tue, 19 Oct 2021 14:10:34 +0000",
        "Message-Id": "<20211019141041.1890983-10-kevin.laatz@intel.com>",
        "X-Mailer": "git-send-email 2.30.2",
        "In-Reply-To": "<20211019141041.1890983-1-kevin.laatz@intel.com>",
        "References": "<20210827172048.558704-1-kevin.laatz@intel.com>\n <20211019141041.1890983-1-kevin.laatz@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Subject": "[dpdk-dev] [PATCH v10 09/16] dma/idxd: add data-path job submission\n functions",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Add data path functions for enqueuing and submitting operations to DSA\ndevices.\n\nDocumentation updates are included for dmadev library and IDXD driver docs\nas appropriate.\n\nSigned-off-by: Bruce Richardson <bruce.richardson@intel.com>\nSigned-off-by: Kevin Laatz <kevin.laatz@intel.com>\nReviewed-by: Conor Walsh <conor.walsh@intel.com>\n---\n doc/guides/dmadevs/idxd.rst      |   9 +++\n doc/guides/prog_guide/dmadev.rst |  19 +++++\n drivers/dma/idxd/idxd_common.c   | 135 +++++++++++++++++++++++++++++++\n drivers/dma/idxd/idxd_internal.h |   5 ++\n drivers/dma/idxd/meson.build     |   1 +\n 5 files changed, 169 insertions(+)",
    "diff": "diff --git a/doc/guides/dmadevs/idxd.rst b/doc/guides/dmadevs/idxd.rst\nindex 711890bd9e..d548c4751a 100644\n--- a/doc/guides/dmadevs/idxd.rst\n+++ b/doc/guides/dmadevs/idxd.rst\n@@ -138,3 +138,12 @@ IDXD configuration requirements:\n \n Once configured, the device can then be made ready for use by calling the\n ``rte_dma_start()`` API.\n+\n+Performing Data Copies\n+~~~~~~~~~~~~~~~~~~~~~~~\n+\n+Refer to the :ref:`Enqueue / Dequeue APIs <dmadev_enqueue_dequeue>` section of the dmadev library\n+documentation for details on operation enqueue and submission API usage.\n+\n+It is expected that, for efficiency reasons, a burst of operations will be enqueued to the\n+device via multiple enqueue calls between calls to the ``rte_dma_submit()`` function.\ndiff --git a/doc/guides/prog_guide/dmadev.rst b/doc/guides/prog_guide/dmadev.rst\nindex 32f7147862..e853ffda3a 100644\n--- a/doc/guides/prog_guide/dmadev.rst\n+++ b/doc/guides/prog_guide/dmadev.rst\n@@ -67,6 +67,8 @@ can be used to get the device info and supported features.\n Silent mode is a special device capability which does not require the\n application to invoke dequeue APIs.\n \n+.. _dmadev_enqueue_dequeue:\n+\n \n Enqueue / Dequeue APIs\n ~~~~~~~~~~~~~~~~~~~~~~\n@@ -80,6 +82,23 @@ The ``rte_dma_submit`` API is used to issue doorbell to hardware.\n Alternatively the ``RTE_DMA_OP_FLAG_SUBMIT`` flag can be passed to the enqueue\n APIs to also issue the doorbell to hardware.\n \n+The following code demonstrates how to enqueue a burst of copies to the\n+device and start the hardware processing of them:\n+\n+.. code-block:: C\n+\n+   struct rte_mbuf *srcs[DMA_BURST_SZ], *dsts[DMA_BURST_SZ];\n+   unsigned int i;\n+\n+   for (i = 0; i < RTE_DIM(srcs); i++) {\n+      if (rte_dma_copy(dev_id, vchan, rte_pktmbuf_iova(srcs),\n+            rte_pktmbuf_iova(dsts), COPY_LEN, 0) < 0) {\n+         PRINT_ERR(\"Error with rte_dma_copy for buffer %u\\n\", i);\n+         return -1;\n+      }\n+   }\n+   rte_dma_submit(dev_id, vchan);\n+\n There are two dequeue APIs ``rte_dma_completed`` and\n ``rte_dma_completed_status``, these are used to obtain the results of the\n enqueue requests. ``rte_dma_completed`` will return the number of successfully\ndiff --git a/drivers/dma/idxd/idxd_common.c b/drivers/dma/idxd/idxd_common.c\nindex b0c79a2e42..a686ad421c 100644\n--- a/drivers/dma/idxd/idxd_common.c\n+++ b/drivers/dma/idxd/idxd_common.c\n@@ -2,14 +2,145 @@\n  * Copyright 2021 Intel Corporation\n  */\n \n+#include <x86intrin.h>\n+\n #include <rte_malloc.h>\n #include <rte_common.h>\n #include <rte_log.h>\n+#include <rte_prefetch.h>\n \n #include \"idxd_internal.h\"\n \n #define IDXD_PMD_NAME_STR \"dmadev_idxd\"\n \n+static __rte_always_inline rte_iova_t\n+__desc_idx_to_iova(struct idxd_dmadev *idxd, uint16_t n)\n+{\n+\treturn idxd->desc_iova + (n * sizeof(struct idxd_hw_desc));\n+}\n+\n+static __rte_always_inline void\n+__idxd_movdir64b(volatile void *dst, const struct idxd_hw_desc *src)\n+{\n+\tasm volatile (\".byte 0x66, 0x0f, 0x38, 0xf8, 0x02\"\n+\t\t\t:\n+\t\t\t: \"a\" (dst), \"d\" (src)\n+\t\t\t: \"memory\");\n+}\n+\n+static __rte_always_inline void\n+__submit(struct idxd_dmadev *idxd)\n+{\n+\trte_prefetch1(&idxd->batch_comp_ring[idxd->batch_idx_read]);\n+\n+\tif (idxd->batch_size == 0)\n+\t\treturn;\n+\n+\t/* write completion to batch comp ring */\n+\trte_iova_t comp_addr = idxd->batch_iova +\n+\t\t\t(idxd->batch_idx_write * sizeof(struct idxd_completion));\n+\n+\tif (idxd->batch_size == 1) {\n+\t\t/* submit batch directly */\n+\t\tstruct idxd_hw_desc desc =\n+\t\t\t\tidxd->desc_ring[idxd->batch_start & idxd->desc_ring_mask];\n+\t\tdesc.completion = comp_addr;\n+\t\tdesc.op_flags |= IDXD_FLAG_REQUEST_COMPLETION;\n+\t\t_mm_sfence(); /* fence before writing desc to device */\n+\t\t__idxd_movdir64b(idxd->portal, &desc);\n+\t} else {\n+\t\tconst struct idxd_hw_desc batch_desc = {\n+\t\t\t\t.op_flags = (idxd_op_batch << IDXD_CMD_OP_SHIFT) |\n+\t\t\t\tIDXD_FLAG_COMPLETION_ADDR_VALID |\n+\t\t\t\tIDXD_FLAG_REQUEST_COMPLETION,\n+\t\t\t\t.desc_addr = __desc_idx_to_iova(idxd,\n+\t\t\t\t\t\tidxd->batch_start & idxd->desc_ring_mask),\n+\t\t\t\t.completion = comp_addr,\n+\t\t\t\t.size = idxd->batch_size,\n+\t\t};\n+\t\t_mm_sfence(); /* fence before writing desc to device */\n+\t\t__idxd_movdir64b(idxd->portal, &batch_desc);\n+\t}\n+\n+\tif (++idxd->batch_idx_write > idxd->max_batches)\n+\t\tidxd->batch_idx_write = 0;\n+\n+\tidxd->batch_start += idxd->batch_size;\n+\tidxd->batch_size = 0;\n+\tidxd->batch_idx_ring[idxd->batch_idx_write] = idxd->batch_start;\n+\t_mm256_store_si256((void *)&idxd->batch_comp_ring[idxd->batch_idx_write],\n+\t\t\t_mm256_setzero_si256());\n+}\n+\n+static __rte_always_inline int\n+__idxd_write_desc(struct idxd_dmadev *idxd,\n+\t\tconst uint32_t op_flags,\n+\t\tconst rte_iova_t src,\n+\t\tconst rte_iova_t dst,\n+\t\tconst uint32_t size,\n+\t\tconst uint32_t flags)\n+{\n+\tuint16_t mask = idxd->desc_ring_mask;\n+\tuint16_t job_id = idxd->batch_start + idxd->batch_size;\n+\t/* we never wrap batches, so we only mask the start and allow start+size to overflow */\n+\tuint16_t write_idx = (idxd->batch_start & mask) + idxd->batch_size;\n+\n+\t/* first check batch ring space then desc ring space */\n+\tif ((idxd->batch_idx_read == 0 && idxd->batch_idx_write == idxd->max_batches) ||\n+\t\t\tidxd->batch_idx_write + 1 == idxd->batch_idx_read)\n+\t\treturn -ENOSPC;\n+\tif (((write_idx + 1) & mask) == (idxd->ids_returned & mask))\n+\t\treturn -ENOSPC;\n+\n+\t/* write desc. Note: descriptors don't wrap, but the completion address does */\n+\tconst uint64_t op_flags64 = (uint64_t)(op_flags | IDXD_FLAG_COMPLETION_ADDR_VALID) << 32;\n+\tconst uint64_t comp_addr = __desc_idx_to_iova(idxd, write_idx & mask);\n+\t_mm256_store_si256((void *)&idxd->desc_ring[write_idx],\n+\t\t\t_mm256_set_epi64x(dst, src, comp_addr, op_flags64));\n+\t_mm256_store_si256((void *)&idxd->desc_ring[write_idx].size,\n+\t\t\t_mm256_set_epi64x(0, 0, 0, size));\n+\n+\tidxd->batch_size++;\n+\n+\trte_prefetch0_write(&idxd->desc_ring[write_idx + 1]);\n+\n+\tif (flags & RTE_DMA_OP_FLAG_SUBMIT)\n+\t\t__submit(idxd);\n+\n+\treturn job_id;\n+}\n+\n+int\n+idxd_enqueue_copy(void *dev_private, uint16_t qid __rte_unused, rte_iova_t src,\n+\t\trte_iova_t dst, unsigned int length, uint64_t flags)\n+{\n+\t/* we can take advantage of the fact that the fence flag in dmadev and DSA are the same,\n+\t * but check it at compile time to be sure.\n+\t */\n+\tRTE_BUILD_BUG_ON(RTE_DMA_OP_FLAG_FENCE != IDXD_FLAG_FENCE);\n+\tuint32_t memmove = (idxd_op_memmove << IDXD_CMD_OP_SHIFT) |\n+\t\t\tIDXD_FLAG_CACHE_CONTROL | (flags & IDXD_FLAG_FENCE);\n+\treturn __idxd_write_desc(dev_private, memmove, src, dst, length,\n+\t\t\tflags);\n+}\n+\n+int\n+idxd_enqueue_fill(void *dev_private, uint16_t qid __rte_unused, uint64_t pattern,\n+\t\trte_iova_t dst, unsigned int length, uint64_t flags)\n+{\n+\tuint32_t fill = (idxd_op_fill << IDXD_CMD_OP_SHIFT) |\n+\t\t\tIDXD_FLAG_CACHE_CONTROL | (flags & IDXD_FLAG_FENCE);\n+\treturn __idxd_write_desc(dev_private, fill, pattern, dst, length,\n+\t\t\tflags);\n+}\n+\n+int\n+idxd_submit(void *dev_private, uint16_t qid __rte_unused)\n+{\n+\t__submit(dev_private);\n+\treturn 0;\n+}\n+\n int\n idxd_dump(const struct rte_dma_dev *dev, FILE *f)\n {\n@@ -139,6 +270,10 @@ idxd_dmadev_create(const char *name, struct rte_device *dev,\n \tdmadev->dev_ops = ops;\n \tdmadev->device = dev;\n \n+\tdmadev->fp_obj->copy = idxd_enqueue_copy;\n+\tdmadev->fp_obj->fill = idxd_enqueue_fill;\n+\tdmadev->fp_obj->submit = idxd_submit;\n+\n \tidxd = dmadev->data->dev_private;\n \t*idxd = *base_idxd; /* copy over the main fields already passed in */\n \tidxd->dmadev = dmadev;\ndiff --git a/drivers/dma/idxd/idxd_internal.h b/drivers/dma/idxd/idxd_internal.h\nindex 1dbe31abcd..ab4d71095e 100644\n--- a/drivers/dma/idxd/idxd_internal.h\n+++ b/drivers/dma/idxd/idxd_internal.h\n@@ -87,5 +87,10 @@ int idxd_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,\n \t\tconst struct rte_dma_vchan_conf *qconf, uint32_t qconf_sz);\n int idxd_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,\n \t\tuint32_t size);\n+int idxd_enqueue_copy(void *dev_private, uint16_t qid, rte_iova_t src,\n+\t\trte_iova_t dst, unsigned int length, uint64_t flags);\n+int idxd_enqueue_fill(void *dev_private, uint16_t qid, uint64_t pattern,\n+\t\trte_iova_t dst, unsigned int length, uint64_t flags);\n+int idxd_submit(void *dev_private, uint16_t qid);\n \n #endif /* _IDXD_INTERNAL_H_ */\ndiff --git a/drivers/dma/idxd/meson.build b/drivers/dma/idxd/meson.build\nindex 37af6e1b8f..fdfce81a94 100644\n--- a/drivers/dma/idxd/meson.build\n+++ b/drivers/dma/idxd/meson.build\n@@ -5,6 +5,7 @@ build = dpdk_conf.has('RTE_ARCH_X86')\n reason = 'only supported on x86'\n \n deps += ['bus_pci']\n+cflags += '-mavx2' # all platforms with idxd HW support AVX\n sources = files(\n         'idxd_common.c',\n         'idxd_pci.c'\n",
    "prefixes": [
        "v10",
        "09/16"
    ]
}