get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/86966/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 86966,
    "url": "https://patches.dpdk.org/api/patches/86966/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1611142175-409485-9-git-send-email-matan@nvidia.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1611142175-409485-9-git-send-email-matan@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1611142175-409485-9-git-send-email-matan@nvidia.com",
    "date": "2021-01-20T11:29:32",
    "name": "[v3,08/11] compress/mlx5: add data-path functions",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "de83d2328c39a21aaef4023283d52edec70e487b",
    "submitter": {
        "id": 1911,
        "url": "https://patches.dpdk.org/api/people/1911/?format=api",
        "name": "Matan Azrad",
        "email": "matan@nvidia.com"
    },
    "delegate": {
        "id": 6690,
        "url": "https://patches.dpdk.org/api/users/6690/?format=api",
        "username": "akhil",
        "first_name": "akhil",
        "last_name": "goyal",
        "email": "gakhil@marvell.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1611142175-409485-9-git-send-email-matan@nvidia.com/mbox/",
    "series": [
        {
            "id": 14865,
            "url": "https://patches.dpdk.org/api/series/14865/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=14865",
            "date": "2021-01-20T11:29:24",
            "name": "add mlx5 compress PMD",
            "version": 3,
            "mbox": "https://patches.dpdk.org/series/14865/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/86966/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/86966/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 27A80A0A05;\n\tWed, 20 Jan 2021 12:34:41 +0100 (CET)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id EBA37140D5D;\n\tWed, 20 Jan 2021 12:34:38 +0100 (CET)",
            "from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129])\n by mails.dpdk.org (Postfix) with ESMTP id C76EA140D5B\n for <dev@dpdk.org>; Wed, 20 Jan 2021 12:34:37 +0100 (CET)",
            "from Internal Mail-Server by MTLPINE1 (envelope-from\n matan@nvidia.com) with SMTP; 20 Jan 2021 13:34:37 +0200",
            "from pegasus25.mtr.labs.mlnx. (pegasus25.mtr.labs.mlnx\n [10.210.16.10])\n by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 10KBTfXF001381;\n Wed, 20 Jan 2021 13:34:37 +0200"
        ],
        "From": "Matan Azrad <matan@nvidia.com>",
        "To": "dev@dpdk.org",
        "Cc": "Thomas Monjalon <thomas@monjalon.net>,\n Ashish Gupta <ashish.gupta@marvell.com>,\n Fiona Trahe <fiona.trahe@intel.com>, akhil.goyal@nxp.com",
        "Date": "Wed, 20 Jan 2021 11:29:32 +0000",
        "Message-Id": "<1611142175-409485-9-git-send-email-matan@nvidia.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1611142175-409485-1-git-send-email-matan@nvidia.com>",
        "References": "<1610554690-411627-1-git-send-email-matan@nvidia.com>\n <1611142175-409485-1-git-send-email-matan@nvidia.com>",
        "Subject": "[dpdk-dev] [PATCH v3 08/11] compress/mlx5: add data-path functions",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Add implementation for the next compress data-path functions:\n\t- dequeue_burst\n\t- enqueue_burst\n\nAdd the next operation for starting \\ stopping data-path:\n\t- dev_stop\n\t- dev_close\n\nEach compress API enqueued operation is translated to a WQE.\nOnce WQE is done, the HW sends CQE to the CQ, when SW see the CQE the\noperation will be updated and dequeued.\n\nSigned-off-by: Matan Azrad <matan@nvidia.com>\nAcked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>\n---\n drivers/compress/mlx5/mlx5_compress.c | 219 +++++++++++++++++++++++++++++++++-\n 1 file changed, 215 insertions(+), 4 deletions(-)",
    "diff": "diff --git a/drivers/compress/mlx5/mlx5_compress.c b/drivers/compress/mlx5/mlx5_compress.c\nindex 467b80a..17aa206 100644\n--- a/drivers/compress/mlx5/mlx5_compress.c\n+++ b/drivers/compress/mlx5/mlx5_compress.c\n@@ -18,6 +18,7 @@\n #include <mlx5_common_os.h>\n #include <mlx5_common_devx.h>\n #include <mlx5_common_mr.h>\n+#include <mlx5_common_os.h>\n #include <mlx5_prm.h>\n \n #include \"mlx5_compress_utils.h\"\n@@ -348,10 +349,23 @@ struct mlx5_compress_qp {\n \treturn -ENOTSUP;\n }\n \n+static void\n+mlx5_compress_dev_stop(struct rte_compressdev *dev)\n+{\n+\tRTE_SET_USED(dev);\n+}\n+\n+static int\n+mlx5_compress_dev_start(struct rte_compressdev *dev)\n+{\n+\tRTE_SET_USED(dev);\n+\treturn 0;\n+}\n+\n static struct rte_compressdev_ops mlx5_compress_ops = {\n \t.dev_configure\t\t= mlx5_compress_dev_configure,\n-\t.dev_start\t\t= NULL,\n-\t.dev_stop\t\t= NULL,\n+\t.dev_start\t\t= mlx5_compress_dev_start,\n+\t.dev_stop\t\t= mlx5_compress_dev_stop,\n \t.dev_close\t\t= mlx5_compress_dev_close,\n \t.dev_infos_get\t\t= mlx5_compress_dev_info_get,\n \t.stats_get\t\t= NULL,\n@@ -364,6 +378,203 @@ struct mlx5_compress_qp {\n \t.stream_free\t\t= NULL,\n };\n \n+static __rte_always_inline uint32_t\n+mlx5_compress_dseg_set(struct mlx5_compress_qp *qp,\n+\t\t       volatile struct mlx5_wqe_dseg *restrict dseg,\n+\t\t       struct rte_mbuf *restrict mbuf,\n+\t\t       uint32_t offset, uint32_t len)\n+{\n+\tuintptr_t addr = rte_pktmbuf_mtod_offset(mbuf, uintptr_t, offset);\n+\n+\tdseg->bcount = rte_cpu_to_be_32(len);\n+\tdseg->lkey = mlx5_mr_addr2mr_bh(qp->priv->pd, 0, &qp->priv->mr_scache,\n+\t\t\t\t\t&qp->mr_ctrl, addr,\n+\t\t\t\t\t!!(mbuf->ol_flags & EXT_ATTACHED_MBUF));\n+\tdseg->pbuf = rte_cpu_to_be_64(addr);\n+\treturn dseg->lkey;\n+}\n+\n+static uint16_t\n+mlx5_compress_enqueue_burst(void *queue_pair, struct rte_comp_op **ops,\n+\t\t\t    uint16_t nb_ops)\n+{\n+\tstruct mlx5_compress_qp *qp = queue_pair;\n+\tvolatile struct mlx5_gga_wqe *wqes = (volatile struct mlx5_gga_wqe *)\n+\t\t\t\t\t\t\t      qp->sq.wqes, *wqe;\n+\tstruct mlx5_compress_xform *xform;\n+\tstruct rte_comp_op *op;\n+\tuint16_t mask = qp->entries_n - 1;\n+\tuint16_t remain = qp->entries_n - (qp->pi - qp->ci);\n+\tuint16_t idx;\n+\tbool invalid;\n+\n+\tif (remain < nb_ops)\n+\t\tnb_ops = remain;\n+\telse\n+\t\tremain = nb_ops;\n+\tif (unlikely(remain == 0))\n+\t\treturn 0;\n+\tdo {\n+\t\tidx = qp->pi & mask;\n+\t\twqe = &wqes[idx];\n+\t\trte_prefetch0(&wqes[(qp->pi + 1) & mask]);\n+\t\top = *ops++;\n+\t\txform = op->private_xform;\n+\t\t/*\n+\t\t * Check operation arguments and error cases:\n+\t\t *   - Operation type must be state-less.\n+\t\t *   - Compress operation flush flag must be FULL or FINAL.\n+\t\t *   - Source and destination buffers must be mapped internally.\n+\t\t */\n+\t\tinvalid = op->op_type != RTE_COMP_OP_STATELESS ||\n+\t\t\t\t\t    (xform->type == RTE_COMP_COMPRESS &&\n+\t\t\t\t\t  op->flush_flag < RTE_COMP_FLUSH_FULL);\n+\t\tif (unlikely(invalid ||\n+\t\t\t     (mlx5_compress_dseg_set(qp, &wqe->gather,\n+\t\t\t\t\t\t     op->m_src,\n+\t\t\t\t\t\t     op->src.offset,\n+\t\t\t\t\t\t     op->src.length) ==\n+\t\t\t\t\t\t\t\t  UINT32_MAX) ||\n+\t\t\t     (mlx5_compress_dseg_set(qp, &wqe->scatter,\n+\t\t\t\t\t\top->m_dst,\n+\t\t\t\t\t\top->dst.offset,\n+\t\t\t\t\t\trte_pktmbuf_pkt_len(op->m_dst) -\n+\t\t\t\t\t\t\t      op->dst.offset) ==\n+\t\t\t\t\t\t\t\t UINT32_MAX))) {\n+\t\t\top->status = invalid ? RTE_COMP_OP_STATUS_INVALID_ARGS :\n+\t\t\t\t\t\t       RTE_COMP_OP_STATUS_ERROR;\n+\t\t\tnb_ops -= remain;\n+\t\t\tif (unlikely(nb_ops == 0))\n+\t\t\t\treturn 0;\n+\t\t\tbreak;\n+\t\t}\n+\t\twqe->gga_ctrl1 = xform->gga_ctrl1;\n+\t\twqe->opcode = rte_cpu_to_be_32(xform->opcode + (qp->pi << 8));\n+\t\tqp->ops[idx] = op;\n+\t\tqp->pi++;\n+\t} while (--remain);\n+\trte_io_wmb();\n+\tqp->sq.db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(qp->pi);\n+\trte_wmb();\n+\t*qp->uar_addr = *(volatile uint64_t *)wqe; /* Assume 64 bit ARCH.*/\n+\trte_wmb();\n+\treturn nb_ops;\n+}\n+\n+static void\n+mlx5_compress_dump_err_objs(volatile uint32_t *cqe, volatile uint32_t *wqe,\n+\t\t\t     volatile uint32_t *opaq)\n+{\n+\tsize_t i;\n+\n+\tDRV_LOG(ERR, \"Error cqe:\");\n+\tfor (i = 0; i < sizeof(struct mlx5_err_cqe) >> 2; i += 4)\n+\t\tDRV_LOG(ERR, \"%08X %08X %08X %08X\", cqe[i], cqe[i + 1],\n+\t\t\tcqe[i + 2], cqe[i + 3]);\n+\tDRV_LOG(ERR, \"\\nError wqe:\");\n+\tfor (i = 0; i < sizeof(struct mlx5_gga_wqe) >> 2; i += 4)\n+\t\tDRV_LOG(ERR, \"%08X %08X %08X %08X\", wqe[i], wqe[i + 1],\n+\t\t\twqe[i + 2], wqe[i + 3]);\n+\tDRV_LOG(ERR, \"\\nError opaq:\");\n+\tfor (i = 0; i < sizeof(struct mlx5_gga_compress_opaque) >> 2; i += 4)\n+\t\tDRV_LOG(ERR, \"%08X %08X %08X %08X\", opaq[i], opaq[i + 1],\n+\t\t\topaq[i + 2], opaq[i + 3]);\n+}\n+\n+static void\n+mlx5_compress_cqe_err_handle(struct mlx5_compress_qp *qp,\n+\t\t\t     struct rte_comp_op *op)\n+{\n+\tconst uint32_t idx = qp->ci & (qp->entries_n - 1);\n+\tvolatile struct mlx5_err_cqe *cqe = (volatile struct mlx5_err_cqe *)\n+\t\t\t\t\t\t\t      &qp->cq.cqes[idx];\n+\tvolatile struct mlx5_gga_wqe *wqes = (volatile struct mlx5_gga_wqe *)\n+\t\t\t\t\t\t\t\t    qp->sq.wqes;\n+\tvolatile struct mlx5_gga_compress_opaque *opaq = qp->opaque_mr.addr;\n+\n+\top->status = RTE_COMP_OP_STATUS_ERROR;\n+\top->consumed = 0;\n+\top->produced = 0;\n+\top->output_chksum = 0;\n+\top->debug_status = rte_be_to_cpu_32(opaq[idx].syndrom) |\n+\t\t\t      ((uint64_t)rte_be_to_cpu_32(cqe->syndrome) << 32);\n+\tmlx5_compress_dump_err_objs((volatile uint32_t *)cqe,\n+\t\t\t\t (volatile uint32_t *)&wqes[idx],\n+\t\t\t\t (volatile uint32_t *)&opaq[idx]);\n+}\n+\n+static uint16_t\n+mlx5_compress_dequeue_burst(void *queue_pair, struct rte_comp_op **ops,\n+\t\t\t    uint16_t nb_ops)\n+{\n+\tstruct mlx5_compress_qp *qp = queue_pair;\n+\tvolatile struct mlx5_compress_xform *restrict xform;\n+\tvolatile struct mlx5_cqe *restrict cqe;\n+\tvolatile struct mlx5_gga_compress_opaque *opaq = qp->opaque_mr.addr;\n+\tstruct rte_comp_op *restrict op;\n+\tconst unsigned int cq_size = qp->entries_n;\n+\tconst unsigned int mask = cq_size - 1;\n+\tuint32_t idx;\n+\tuint32_t next_idx = qp->ci & mask;\n+\tconst uint16_t max = RTE_MIN((uint16_t)(qp->pi - qp->ci), nb_ops);\n+\tuint16_t i = 0;\n+\tint ret;\n+\n+\tif (unlikely(max == 0))\n+\t\treturn 0;\n+\tdo {\n+\t\tidx = next_idx;\n+\t\tnext_idx = (qp->ci + 1) & mask;\n+\t\trte_prefetch0(&qp->cq.cqes[next_idx]);\n+\t\trte_prefetch0(qp->ops[next_idx]);\n+\t\top = qp->ops[idx];\n+\t\tcqe = &qp->cq.cqes[idx];\n+\t\tret = check_cqe(cqe, cq_size, qp->ci);\n+\t\t/*\n+\t\t * Be sure owner read is done before any other cookie field or\n+\t\t * opaque field.\n+\t\t */\n+\t\trte_io_rmb();\n+\t\tif (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {\n+\t\t\tif (likely(ret == MLX5_CQE_STATUS_HW_OWN))\n+\t\t\t\tbreak;\n+\t\t\tmlx5_compress_cqe_err_handle(qp, op);\n+\t\t} else {\n+\t\t\txform = op->private_xform;\n+\t\t\top->status = RTE_COMP_OP_STATUS_SUCCESS;\n+\t\t\top->consumed = op->src.length;\n+\t\t\top->produced = rte_be_to_cpu_32(cqe->byte_cnt);\n+\t\t\tMLX5_ASSERT(cqe->byte_cnt ==\n+\t\t\t\t    qp->opaque_buf[idx].scattered_length);\n+\t\t\tswitch (xform->csum_type) {\n+\t\t\tcase RTE_COMP_CHECKSUM_CRC32:\n+\t\t\t\top->output_chksum = (uint64_t)rte_be_to_cpu_32\n+\t\t\t\t\t\t    (opaq[idx].crc32);\n+\t\t\t\tbreak;\n+\t\t\tcase RTE_COMP_CHECKSUM_ADLER32:\n+\t\t\t\top->output_chksum = (uint64_t)rte_be_to_cpu_32\n+\t\t\t\t\t    (opaq[idx].adler32) << 32;\n+\t\t\t\tbreak;\n+\t\t\tcase RTE_COMP_CHECKSUM_CRC32_ADLER32:\n+\t\t\t\top->output_chksum = (uint64_t)rte_be_to_cpu_32\n+\t\t\t\t\t\t\t     (opaq[idx].crc32) |\n+\t\t\t\t\t\t     ((uint64_t)rte_be_to_cpu_32\n+\t\t\t\t\t\t     (opaq[idx].adler32) << 32);\n+\t\t\t\tbreak;\n+\t\t\tdefault:\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t}\n+\t\tops[i++] = op;\n+\t\tqp->ci++;\n+\t} while (i < max);\n+\tif (likely(i != 0)) {\n+\t\trte_io_wmb();\n+\t\tqp->cq.db_rec[0] = rte_cpu_to_be_32(qp->ci);\n+\t}\n+\treturn i;\n+}\n+\n static struct ibv_device *\n mlx5_compress_get_ib_device_match(struct rte_pci_addr *addr)\n {\n@@ -520,8 +731,8 @@ struct mlx5_compress_qp {\n \tDRV_LOG(INFO,\n \t\t\"Compress device %s was created successfully.\", ibv->name);\n \tcdev->dev_ops = &mlx5_compress_ops;\n-\tcdev->dequeue_burst = NULL;\n-\tcdev->enqueue_burst = NULL;\n+\tcdev->dequeue_burst = mlx5_compress_dequeue_burst;\n+\tcdev->enqueue_burst = mlx5_compress_enqueue_burst;\n \tcdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;\n \tpriv = cdev->data->dev_private;\n \tpriv->ctx = ctx;\n",
    "prefixes": [
        "v3",
        "08/11"
    ]
}