get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/97892/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 97892,
    "url": "https://patches.dpdk.org/api/patches/97892/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20210903105001.1179328-12-kevin.laatz@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210903105001.1179328-12-kevin.laatz@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210903105001.1179328-12-kevin.laatz@intel.com",
    "date": "2021-09-03T10:49:56",
    "name": "[v2,11/16] dma/idxd: add data-path job completion functions",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "ada4fa7a2574ee49efbf19b691be3a66e39a1bbe",
    "submitter": {
        "id": 921,
        "url": "https://patches.dpdk.org/api/people/921/?format=api",
        "name": "Kevin Laatz",
        "email": "kevin.laatz@intel.com"
    },
    "delegate": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20210903105001.1179328-12-kevin.laatz@intel.com/mbox/",
    "series": [
        {
            "id": 18658,
            "url": "https://patches.dpdk.org/api/series/18658/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=18658",
            "date": "2021-09-03T10:49:45",
            "name": "add dmadev driver for idxd devices",
            "version": 2,
            "mbox": "https://patches.dpdk.org/series/18658/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/97892/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/97892/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id BF8C0A0C54;\n\tFri,  3 Sep 2021 12:51:21 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id A561D4111B;\n\tFri,  3 Sep 2021 12:50:33 +0200 (CEST)",
            "from mga11.intel.com (mga11.intel.com [192.55.52.93])\n by mails.dpdk.org (Postfix) with ESMTP id BBEAA41153\n for <dev@dpdk.org>; Fri,  3 Sep 2021 12:50:31 +0200 (CEST)",
            "from fmsmga004.fm.intel.com ([10.253.24.48])\n by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 03 Sep 2021 03:50:31 -0700",
            "from silpixa00401122.ir.intel.com ([10.55.128.10])\n by fmsmga004.fm.intel.com with ESMTP; 03 Sep 2021 03:50:29 -0700"
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6200,9189,10095\"; a=\"216247399\"",
            "E=Sophos;i=\"5.85,265,1624345200\"; d=\"scan'208\";a=\"216247399\"",
            "E=Sophos;i=\"5.85,265,1624345200\"; d=\"scan'208\";a=\"521643596\""
        ],
        "X-ExtLoop1": "1",
        "From": "Kevin Laatz <kevin.laatz@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "bruce.richardson@intel.com, fengchengwen@huawei.com, jerinj@marvell.com,\n conor.walsh@intel.com, Kevin Laatz <kevin.laatz@intel.com>",
        "Date": "Fri,  3 Sep 2021 10:49:56 +0000",
        "Message-Id": "<20210903105001.1179328-12-kevin.laatz@intel.com>",
        "X-Mailer": "git-send-email 2.30.2",
        "In-Reply-To": "<20210903105001.1179328-1-kevin.laatz@intel.com>",
        "References": "<20210827172048.558704-1-kevin.laatz@intel.com>\n <20210903105001.1179328-1-kevin.laatz@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Subject": "[dpdk-dev] [PATCH v2 11/16] dma/idxd: add data-path job completion\n functions",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Add the data path functions for gathering completed operations.\n\nSigned-off-by: Bruce Richardson <bruce.richardson@intel.com>\nSigned-off-by: Kevin Laatz <kevin.laatz@intel.com>\n\n---\nv2:\n   - fixed typo in docs\n   - add completion status for invalid opcode\n---\n doc/guides/dmadevs/idxd.rst      |  25 ++++\n drivers/dma/idxd/idxd_common.c   | 237 +++++++++++++++++++++++++++++++\n drivers/dma/idxd/idxd_internal.h |   5 +\n 3 files changed, 267 insertions(+)",
    "diff": "diff --git a/doc/guides/dmadevs/idxd.rst b/doc/guides/dmadevs/idxd.rst\nindex 0c4c105e0f..b0b5632b48 100644\n--- a/doc/guides/dmadevs/idxd.rst\n+++ b/doc/guides/dmadevs/idxd.rst\n@@ -209,6 +209,31 @@ device and start the hardware processing of them:\n    }\n    rte_dmadev_submit(dev_id, vchan);\n \n+To retrieve information about completed copies, ``rte_dmadev_completed()`` and\n+``rte_dmadev_completed_status()`` APIs should be used. ``rte_dmadev_completed()``\n+will return the number of completed operations, along with the index of the last\n+successful completed operation and whether or not an error was encountered. If an\n+error was encountered, ``rte_dmadev_completed_status()`` must be used to kick the\n+device off to continue processing operations and also to gather the status of each\n+individual operations which is filled in to the ``status`` array provided as\n+parameter by the application.\n+\n+The following code shows how to retrieve the number of successfully completed\n+copies within a burst and then using ``rte_dmadev_completed_status()`` to check\n+which operation failed and kick off the device to continue processing operations:\n+\n+.. code-block:: C\n+\n+   enum rte_dma_status_code status[COMP_BURST_SZ];\n+   uint16_t count, idx, status_count;\n+   bool error = 0;\n+\n+   count = rte_dmadev_completed(dev_id, vchan, COMP_BURST_SZ, &idx, &error);\n+\n+   if (error){\n+      status_count = rte_dmadev_completed_status(dev_id, vchan, COMP_BURST_SZ, &idx, status);\n+   }\n+\n Filling an Area of Memory\n ~~~~~~~~~~~~~~~~~~~~~~~~~~\n \ndiff --git a/drivers/dma/idxd/idxd_common.c b/drivers/dma/idxd/idxd_common.c\nindex d72e83537d..1bbe313c09 100644\n--- a/drivers/dma/idxd/idxd_common.c\n+++ b/drivers/dma/idxd/idxd_common.c\n@@ -143,6 +143,241 @@ idxd_submit(struct rte_dmadev *dev, uint16_t qid __rte_unused)\n \treturn 0;\n }\n \n+static enum rte_dma_status_code\n+get_comp_status(struct idxd_completion *c)\n+{\n+\tuint8_t st = c->status;\n+\tswitch (st) {\n+\t/* successful descriptors are not written back normally */\n+\tcase IDXD_COMP_STATUS_INCOMPLETE:\n+\tcase IDXD_COMP_STATUS_SUCCESS:\n+\t\treturn RTE_DMA_STATUS_SUCCESSFUL;\n+\tcase IDXD_COMP_STATUS_INVALID_OPCODE:\n+\t\treturn RTE_DMA_STATUS_INVALID_OPCODE;\n+\tcase IDXD_COMP_STATUS_INVALID_SIZE:\n+\t\treturn RTE_DMA_STATUS_INVALID_LENGTH;\n+\tcase IDXD_COMP_STATUS_SKIPPED:\n+\t\treturn RTE_DMA_STATUS_NOT_ATTEMPTED;\n+\tdefault:\n+\t\treturn RTE_DMA_STATUS_ERROR_UNKNOWN;\n+\t}\n+}\n+\n+static __rte_always_inline int\n+batch_ok(struct idxd_dmadev *idxd, uint8_t max_ops, enum rte_dma_status_code *status)\n+{\n+\tuint16_t ret;\n+\tuint8_t bstatus;\n+\n+\tif (max_ops == 0)\n+\t\treturn 0;\n+\n+\t/* first check if there are any unreturned handles from last time */\n+\tif (idxd->ids_avail != idxd->ids_returned) {\n+\t\tret = RTE_MIN((uint16_t)(idxd->ids_avail - idxd->ids_returned), max_ops);\n+\t\tidxd->ids_returned += ret;\n+\t\tif (status)\n+\t\t\tmemset(status, RTE_DMA_STATUS_SUCCESSFUL, ret * sizeof(*status));\n+\t\treturn ret;\n+\t}\n+\n+\tif (idxd->batch_idx_read == idxd->batch_idx_write)\n+\t\treturn 0;\n+\n+\tbstatus = idxd->batch_comp_ring[idxd->batch_idx_read].status;\n+\t/* now check if next batch is complete and successful */\n+\tif (bstatus == IDXD_COMP_STATUS_SUCCESS) {\n+\t\t/* since the batch idx ring stores the start of each batch, pre-increment to lookup\n+\t\t * start of next batch.\n+\t\t */\n+\t\tif (++idxd->batch_idx_read > idxd->max_batches)\n+\t\t\tidxd->batch_idx_read = 0;\n+\t\tidxd->ids_avail = idxd->batch_idx_ring[idxd->batch_idx_read];\n+\n+\t\tret = RTE_MIN((uint16_t)(idxd->ids_avail - idxd->ids_returned), max_ops);\n+\t\tidxd->ids_returned += ret;\n+\t\tif (status)\n+\t\t\tmemset(status, RTE_DMA_STATUS_SUCCESSFUL, ret * sizeof(*status));\n+\t\treturn ret;\n+\t}\n+\t/* check if batch is incomplete */\n+\telse if (bstatus == IDXD_COMP_STATUS_INCOMPLETE)\n+\t\treturn 0;\n+\n+\treturn -1; /* error case */\n+}\n+\n+static inline uint16_t\n+batch_completed(struct idxd_dmadev *idxd, uint8_t max_ops, bool *has_error)\n+{\n+\tuint16_t i;\n+\tuint16_t b_start, b_end, next_batch;\n+\n+\tint ret = batch_ok(idxd, max_ops, NULL);\n+\tif (ret >= 0)\n+\t\treturn ret;\n+\n+\t/* ERROR case, not successful, not incomplete */\n+\t/* Get the batch size, and special case size 1.\n+\t * once we identify the actual failure job, return other jobs, then update\n+\t * the batch ring indexes to make it look like the first job of the batch has failed.\n+\t * Subsequent calls here will always return zero packets, and the error must be cleared by\n+\t * calling the completed_status() function.\n+\t */\n+\tnext_batch = (idxd->batch_idx_read + 1);\n+\tif (next_batch > idxd->max_batches)\n+\t\tnext_batch = 0;\n+\tb_start = idxd->batch_idx_ring[idxd->batch_idx_read];\n+\tb_end = idxd->batch_idx_ring[next_batch];\n+\n+\tif (b_end - b_start == 1) { /* not a batch */\n+\t\t*has_error = true;\n+\t\treturn 0;\n+\t}\n+\n+\tfor (i = b_start; i < b_end; i++) {\n+\t\tstruct idxd_completion *c = (void *)&idxd->desc_ring[i & idxd->desc_ring_mask];\n+\t\tif (c->status > IDXD_COMP_STATUS_SUCCESS) /* ignore incomplete(0) and success(1) */\n+\t\t\tbreak;\n+\t}\n+\tret = RTE_MIN((uint16_t)(i - idxd->ids_returned), max_ops);\n+\tif (ret < max_ops)\n+\t\t*has_error = true; /* we got up to the point of error */\n+\tidxd->ids_avail = idxd->ids_returned += ret;\n+\n+\t/* to ensure we can call twice and just return 0, set start of batch to where we finished */\n+\tidxd->batch_comp_ring[idxd->batch_idx_read].completed_size -= ret;\n+\tidxd->batch_idx_ring[idxd->batch_idx_read] += ret;\n+\tif (idxd->batch_idx_ring[next_batch] - idxd->batch_idx_ring[idxd->batch_idx_read] == 1) {\n+\t\t/* copy over the descriptor status to the batch ring as if no batch */\n+\t\tuint16_t d_idx = idxd->batch_idx_ring[idxd->batch_idx_read] & idxd->desc_ring_mask;\n+\t\tstruct idxd_completion *desc_comp = (void *)&idxd->desc_ring[d_idx];\n+\t\tidxd->batch_comp_ring[idxd->batch_idx_read].status = desc_comp->status;\n+\t}\n+\n+\treturn ret;\n+}\n+\n+static uint16_t\n+batch_completed_status(struct idxd_dmadev *idxd, uint16_t max_ops, enum rte_dma_status_code *status)\n+{\n+\tuint16_t next_batch;\n+\n+\tint ret = batch_ok(idxd, max_ops, status);\n+\tif (ret >= 0)\n+\t\treturn ret;\n+\n+\t/* ERROR case, not successful, not incomplete */\n+\t/* Get the batch size, and special case size 1.\n+\t */\n+\tnext_batch = (idxd->batch_idx_read + 1);\n+\tif (next_batch > idxd->max_batches)\n+\t\tnext_batch = 0;\n+\tconst uint16_t b_start = idxd->batch_idx_ring[idxd->batch_idx_read];\n+\tconst uint16_t b_end = idxd->batch_idx_ring[next_batch];\n+\tconst uint16_t b_len = b_end - b_start;\n+\tif (b_len == 1) {/* not a batch */\n+\t\t*status = get_comp_status(&idxd->batch_comp_ring[idxd->batch_idx_read]);\n+\t\tidxd->ids_avail++;\n+\t\tidxd->ids_returned++;\n+\t\tidxd->batch_idx_read = next_batch;\n+\t\treturn 1;\n+\t}\n+\n+\t/* not a single-element batch, need to process more.\n+\t * Scenarios:\n+\t * 1. max_ops >= batch_size - can fit everything, simple case\n+\t *   - loop through completed ops and then add on any not-attempted ones\n+\t * 2. max_ops < batch_size - can't fit everything, more complex case\n+\t *   - loop through completed/incomplete and stop when hit max_ops\n+\t *   - adjust the batch descriptor to update where we stopped, with appropriate bcount\n+\t *   - if bcount is to be exactly 1, update the batch descriptor as it will be treated as\n+\t *     non-batch next time.\n+\t */\n+\tconst uint16_t bcount = idxd->batch_comp_ring[idxd->batch_idx_read].completed_size;\n+\tfor (ret = 0; ret < b_len && ret < max_ops; ret++) {\n+\t\tstruct idxd_completion *c = (void *)\n+\t\t\t\t&idxd->desc_ring[(b_start + ret) & idxd->desc_ring_mask];\n+\t\tstatus[ret] = (ret < bcount) ? get_comp_status(c) : RTE_DMA_STATUS_NOT_ATTEMPTED;\n+\t}\n+\tidxd->ids_avail = idxd->ids_returned += ret;\n+\n+\t/* everything fit */\n+\tif (ret == b_len) {\n+\t\tidxd->batch_idx_read = next_batch;\n+\t\treturn ret;\n+\t}\n+\n+\t/* set up for next time, update existing batch descriptor & start idx at batch_idx_read */\n+\tidxd->batch_idx_ring[idxd->batch_idx_read] += ret;\n+\tif (ret > bcount) {\n+\t\t/* we have only incomplete ones - set batch completed size to 0 */\n+\t\tstruct idxd_completion *comp = &idxd->batch_comp_ring[idxd->batch_idx_read];\n+\t\tcomp->completed_size = 0;\n+\t\t/* if there is only one descriptor left, job skipped so set flag appropriately */\n+\t\tif (b_len - ret == 1)\n+\t\t\tcomp->status = IDXD_COMP_STATUS_SKIPPED;\n+\t} else {\n+\t\tstruct idxd_completion *comp = &idxd->batch_comp_ring[idxd->batch_idx_read];\n+\t\tcomp->completed_size -= ret;\n+\t\t/* if there is only one descriptor left, copy status info straight to desc */\n+\t\tif (comp->completed_size == 1) {\n+\t\t\tstruct idxd_completion *c = (void *)\n+\t\t\t\t\t&idxd->desc_ring[(b_start + ret) & idxd->desc_ring_mask];\n+\t\t\tcomp->status = c->status;\n+\t\t\t/* individual descs can be ok without writeback, but not batches */\n+\t\t\tif (comp->status == IDXD_COMP_STATUS_INCOMPLETE)\n+\t\t\t\tcomp->status = IDXD_COMP_STATUS_SUCCESS;\n+\t\t} else if (bcount == b_len) {\n+\t\t\t/* check if we still have an error, and clear flag if not */\n+\t\t\tuint16_t i;\n+\t\t\tfor (i = b_start + ret; i < b_end; i++) {\n+\t\t\t\tstruct idxd_completion *c = (void *)\n+\t\t\t\t\t\t&idxd->desc_ring[i & idxd->desc_ring_mask];\n+\t\t\t\tif (c->status > IDXD_COMP_STATUS_SUCCESS)\n+\t\t\t\t\tbreak;\n+\t\t\t}\n+\t\t\tif (i == b_end) /* no errors */\n+\t\t\t\tcomp->status = IDXD_COMP_STATUS_SUCCESS;\n+\t\t}\n+\t}\n+\n+\treturn ret;\n+}\n+\n+uint16_t\n+idxd_completed(struct rte_dmadev *dev, uint16_t qid __rte_unused, uint16_t max_ops,\n+\t\tuint16_t *last_idx, bool *has_error)\n+{\n+\tstruct idxd_dmadev *idxd = dev->dev_private;\n+\tuint16_t batch, ret = 0;\n+\n+\tdo {\n+\t\tbatch = batch_completed(idxd, max_ops - ret, has_error);\n+\t\tret += batch;\n+\t} while (batch > 0 && *has_error == false);\n+\n+\t*last_idx = idxd->ids_returned - 1;\n+\treturn ret;\n+}\n+\n+uint16_t\n+idxd_completed_status(struct rte_dmadev *dev, uint16_t qid __rte_unused, uint16_t max_ops,\n+\t\tuint16_t *last_idx, enum rte_dma_status_code *status)\n+{\n+\tstruct idxd_dmadev *idxd = dev->dev_private;\n+\n+\tuint16_t batch, ret = 0;\n+\n+\tdo {\n+\t\tbatch = batch_completed_status(idxd, max_ops - ret, &status[ret]);\n+\t\tret += batch;\n+\t} while (batch > 0);\n+\n+\t*last_idx = idxd->ids_returned - 1;\n+\treturn ret;\n+}\n+\n int\n idxd_dump(const struct rte_dmadev *dev, FILE *f)\n {\n@@ -270,6 +505,8 @@ idxd_dmadev_create(const char *name, struct rte_device *dev,\n \tdmadev->copy = idxd_enqueue_copy;\n \tdmadev->fill = idxd_enqueue_fill;\n \tdmadev->submit = idxd_submit;\n+\tdmadev->completed = idxd_completed;\n+\tdmadev->completed_status = idxd_completed_status;\n \n \tidxd = rte_malloc_socket(NULL, sizeof(struct idxd_dmadev), 0, dev->numa_node);\n \tif (idxd == NULL) {\ndiff --git a/drivers/dma/idxd/idxd_internal.h b/drivers/dma/idxd/idxd_internal.h\nindex 6a6c69fd61..4bcfe5372b 100644\n--- a/drivers/dma/idxd/idxd_internal.h\n+++ b/drivers/dma/idxd/idxd_internal.h\n@@ -90,5 +90,10 @@ int idxd_enqueue_copy(struct rte_dmadev *dev, uint16_t qid, rte_iova_t src,\n int idxd_enqueue_fill(struct rte_dmadev *dev, uint16_t qid, uint64_t pattern,\n \t\trte_iova_t dst, unsigned int length, uint64_t flags);\n int idxd_submit(struct rte_dmadev *dev, uint16_t qid);\n+uint16_t idxd_completed(struct rte_dmadev *dev, uint16_t qid, uint16_t max_ops,\n+\t\tuint16_t *last_idx, bool *has_error);\n+uint16_t idxd_completed_status(struct rte_dmadev *dev, uint16_t qid __rte_unused,\n+\t\tuint16_t max_ops, uint16_t *last_idx,\n+\t\tenum rte_dma_status_code *status);\n \n #endif /* _IDXD_INTERNAL_H_ */\n",
    "prefixes": [
        "v2",
        "11/16"
    ]
}