get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/36472/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 36472,
    "url": "https://patches.dpdk.org/api/patches/36472/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/ce70a4a8a8073924f8b4545ac535940f6b2e1407.1521964379.git.jianjay.zhou@huawei.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<ce70a4a8a8073924f8b4545ac535940f6b2e1407.1521964379.git.jianjay.zhou@huawei.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/ce70a4a8a8073924f8b4545ac535940f6b2e1407.1521964379.git.jianjay.zhou@huawei.com",
    "date": "2018-03-25T08:33:35",
    "name": "[dpdk-dev,v3,3/7] cryptodev/virtio: core code of crypto devices",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "a1626efe105c99d1e2f91fd03a72a6a115b88e34",
    "submitter": {
        "id": 813,
        "url": "https://patches.dpdk.org/api/people/813/?format=api",
        "name": "Zhoujian (jay)",
        "email": "jianjay.zhou@huawei.com"
    },
    "delegate": {
        "id": 22,
        "url": "https://patches.dpdk.org/api/users/22/?format=api",
        "username": "pdelarag",
        "first_name": "Pablo",
        "last_name": "de Lara Guarch",
        "email": "pablo.de.lara.guarch@intel.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/ce70a4a8a8073924f8b4545ac535940f6b2e1407.1521964379.git.jianjay.zhou@huawei.com/mbox/",
    "series": [],
    "comments": "https://patches.dpdk.org/api/patches/36472/comments/",
    "check": "fail",
    "checks": "https://patches.dpdk.org/api/patches/36472/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 1057FAAA4;\n\tSun, 25 Mar 2018 10:36:01 +0200 (CEST)",
            "from huawei.com (unknown [45.249.212.32])\n\tby dpdk.org (Postfix) with ESMTP id 5B1007CC2\n\tfor <dev@dpdk.org>; Sun, 25 Mar 2018 10:35:56 +0200 (CEST)",
            "from DGGEMS403-HUB.china.huawei.com (unknown [172.30.72.59])\n\tby Forcepoint Email with ESMTP id EBF2A47AA230;\n\tSun, 25 Mar 2018 16:35:52 +0800 (CST)",
            "from localhost (10.177.19.14) by DGGEMS403-HUB.china.huawei.com\n\t(10.3.19.203) with Microsoft SMTP Server id 14.3.361.1;\n\tSun, 25 Mar 2018 16:35:46 +0800"
        ],
        "From": "Jay Zhou <jianjay.zhou@huawei.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<pablo.de.lara.guarch@intel.com>, <roy.fan.zhang@intel.com>,\n\t<thomas@monjalon.net>, <arei.gonglei@huawei.com>, <xin.zeng@intel.com>,\n\t<weidong.huang@huawei.com>, <wangxinxin.wang@huawei.com>,\n\t<longpeng2@huawei.com>, <jianjay.zhou@huawei.com>",
        "Date": "Sun, 25 Mar 2018 16:33:35 +0800",
        "Message-ID": "<ce70a4a8a8073924f8b4545ac535940f6b2e1407.1521964379.git.jianjay.zhou@huawei.com>",
        "X-Mailer": "git-send-email 2.6.1.windows.1",
        "In-Reply-To": "<cover.1521964379.git.jianjay.zhou@huawei.com>",
        "References": "<cover.1521964379.git.jianjay.zhou@huawei.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.177.19.14]",
        "X-CFilter-Loop": "Reflected",
        "Subject": "[dpdk-dev] [PATCH v3 3/7] cryptodev/virtio: core code of crypto\n\tdevices",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "The idea comes from QAT and virtio-net devices.\n\nSigned-off-by: Jay Zhou <jianjay.zhou@huawei.com>\n---\n drivers/crypto/virtio/virtio_crypto_capabilities.h |   51 +\n drivers/crypto/virtio/virtio_cryptodev.c           | 1553 ++++++++++++++++++++\n drivers/crypto/virtio/virtio_cryptodev.h           |   66 +\n drivers/crypto/virtio/virtio_rxtx.c                |  540 +++++++\n 4 files changed, 2210 insertions(+)\n create mode 100644 drivers/crypto/virtio/virtio_crypto_capabilities.h\n create mode 100644 drivers/crypto/virtio/virtio_cryptodev.c\n create mode 100644 drivers/crypto/virtio/virtio_cryptodev.h\n create mode 100644 drivers/crypto/virtio/virtio_rxtx.c",
    "diff": "diff --git a/drivers/crypto/virtio/virtio_crypto_capabilities.h b/drivers/crypto/virtio/virtio_crypto_capabilities.h\nnew file mode 100644\nindex 0000000..0d8dfac\n--- /dev/null\n+++ b/drivers/crypto/virtio/virtio_crypto_capabilities.h\n@@ -0,0 +1,51 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.\n+ */\n+\n+#ifndef _VIRTIO_CRYPTO_CAPABILITIES_H_\n+#define _VIRTIO_CRYPTO_CAPABILITIES_H_\n+\n+#define VIRTIO_SYM_CAPABILITIES\t\t\t\t\t\t\\\n+\t{\t/* SHA1 HMAC */\t\t\t\t\t\t\\\n+\t\t.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,\t\t\t\\\n+\t\t{.sym = {\t\t\t\t\t\t\\\n+\t\t\t.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,\t\\\n+\t\t\t{.auth = {\t\t\t\t\t\\\n+\t\t\t\t.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,\t\\\n+\t\t\t\t.block_size = 64,\t\t\t\\\n+\t\t\t\t.key_size = {\t\t\t\t\\\n+\t\t\t\t\t.min = 1,\t\t\t\\\n+\t\t\t\t\t.max = 64,\t\t\t\\\n+\t\t\t\t\t.increment = 1\t\t\t\\\n+\t\t\t\t},\t\t\t\t\t\\\n+\t\t\t\t.digest_size = {\t\t\t\\\n+\t\t\t\t\t.min = 1,\t\t\t\\\n+\t\t\t\t\t.max = 20,\t\t\t\\\n+\t\t\t\t\t.increment = 1\t\t\t\\\n+\t\t\t\t},\t\t\t\t\t\\\n+\t\t\t\t.iv_size = { 0 }\t\t\t\\\n+\t\t\t}, }\t\t\t\t\t\t\\\n+\t\t}, }\t\t\t\t\t\t\t\\\n+\t},\t\t\t\t\t\t\t\t\\\n+\t{\t/* AES CBC */\t\t\t\t\t\t\\\n+\t\t.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,\t\t\t\\\n+\t\t{.sym = {\t\t\t\t\t\t\\\n+\t\t\t.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,\t\\\n+\t\t\t{.cipher = {\t\t\t\t\t\\\n+\t\t\t\t.algo = RTE_CRYPTO_CIPHER_AES_CBC,\t\\\n+\t\t\t\t.block_size = 16,\t\t\t\\\n+\t\t\t\t.key_size = {\t\t\t\t\\\n+\t\t\t\t\t.min = 16,\t\t\t\\\n+\t\t\t\t\t.max = 32,\t\t\t\\\n+\t\t\t\t\t.increment = 8\t\t\t\\\n+\t\t\t\t},\t\t\t\t\t\\\n+\t\t\t\t.iv_size = {\t\t\t\t\\\n+\t\t\t\t\t.min = 16,\t\t\t\\\n+\t\t\t\t\t.max = 16,\t\t\t\\\n+\t\t\t\t\t.increment = 0\t\t\t\\\n+\t\t\t\t}\t\t\t\t\t\\\n+\t\t\t}, }\t\t\t\t\t\t\\\n+\t\t}, }\t\t\t\t\t\t\t\\\n+\t}\n+\n+#endif /* _VIRTIO_CRYPTO_CAPABILITIES_H_ */\ndiff --git a/drivers/crypto/virtio/virtio_cryptodev.c b/drivers/crypto/virtio/virtio_cryptodev.c\nnew file mode 100644\nindex 0000000..8affbef\n--- /dev/null\n+++ b/drivers/crypto/virtio/virtio_cryptodev.c\n@@ -0,0 +1,1553 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.\n+ */\n+\n+#include <stdint.h>\n+#include <string.h>\n+#include <stdio.h>\n+#include <stdbool.h>\n+#include <errno.h>\n+#include <unistd.h>\n+#ifdef RTE_EXEC_ENV_LINUXAPP\n+#include <dirent.h>\n+#include <fcntl.h>\n+#endif\n+\n+#include <rte_cryptodev.h>\n+#include <rte_cryptodev_pmd.h>\n+#include <rte_memcpy.h>\n+#include <rte_string_fns.h>\n+#include <rte_memzone.h>\n+#include <rte_malloc.h>\n+#include <rte_atomic.h>\n+#include <rte_branch_prediction.h>\n+#include <rte_pci.h>\n+#include <rte_common.h>\n+#include <rte_errno.h>\n+\n+#include <rte_memory.h>\n+#include <rte_eal.h>\n+#include <rte_dev.h>\n+#include <rte_log.h>\n+\n+#include \"virtio_cryptodev.h\"\n+#include \"virtqueue.h\"\n+#include \"virtio_crypto_algs.h\"\n+#include \"virtio_crypto_capabilities.h\"\n+\n+static int virtio_crypto_dev_configure(struct rte_cryptodev *dev,\n+\t\tstruct rte_cryptodev_config *config);\n+static int virtio_crypto_dev_start(struct rte_cryptodev *dev);\n+static void virtio_crypto_dev_stop(struct rte_cryptodev *dev);\n+static int virtio_crypto_dev_close(struct rte_cryptodev *dev);\n+static void virtio_crypto_dev_info_get(struct rte_cryptodev *dev,\n+\t\tstruct rte_cryptodev_info *dev_info);\n+static void virtio_crypto_dev_stats_get(struct rte_cryptodev *dev,\n+\t\tstruct rte_cryptodev_stats *stats);\n+static void virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev);\n+static int virtio_crypto_qp_setup(struct rte_cryptodev *dev,\n+\t\tuint16_t queue_pair_id,\n+\t\tconst struct rte_cryptodev_qp_conf *qp_conf,\n+\t\tint socket_id,\n+\t\tstruct rte_mempool *session_pool);\n+static int virtio_crypto_qp_release(struct rte_cryptodev *dev,\n+\t\tuint16_t queue_pair_id);\n+static void virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev);\n+static unsigned int virtio_crypto_sym_get_session_private_size(\n+\t\tstruct rte_cryptodev *dev);\n+static void virtio_crypto_sym_clear_session(struct rte_cryptodev *dev,\n+\t\tstruct rte_cryptodev_sym_session *sess);\n+static int virtio_crypto_sym_configure_session(struct rte_cryptodev *dev,\n+\t\tstruct rte_crypto_sym_xform *xform,\n+\t\tstruct rte_cryptodev_sym_session *session,\n+\t\tstruct rte_mempool *mp);\n+\n+/*\n+ * The set of PCI devices this driver supports\n+ */\n+static const struct rte_pci_id pci_id_virtio_crypto_map[] = {\n+\t{ RTE_PCI_DEVICE(VIRTIO_CRYPTO_PCI_VENDORID,\n+\t\t\t\t\t\tVIRTIO_CRYPTO_PCI_DEVICEID) },\n+\t{ .vendor_id = 0, /* sentinel */ },\n+};\n+\n+static const struct rte_cryptodev_capabilities virtio_capabilities[] = {\n+\tVIRTIO_SYM_CAPABILITIES,\n+\tRTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()\n+};\n+\n+uint8_t cryptodev_virtio_driver_id;\n+\n+#define NUM_ENTRY_SYM_CREATE_SESSION 4\n+\n+static int\n+virtio_crypto_send_command(struct virtqueue *vq,\n+\t\tstruct virtio_crypto_op_ctrl_req *ctrl, uint8_t *cipher_key,\n+\t\tuint8_t *auth_key, struct virtio_crypto_session *session)\n+{\n+\tuint8_t idx = 0;\n+\tuint8_t needed = 1;\n+\tuint32_t head = 0;\n+\tuint32_t len_cipher_key = 0;\n+\tuint32_t len_auth_key = 0;\n+\tuint32_t len_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);\n+\tuint32_t len_session_input = sizeof(struct virtio_crypto_session_input);\n+\tuint32_t len_total = 0;\n+\tuint32_t input_offset = 0;\n+\tvoid *virt_addr_started = NULL;\n+\tphys_addr_t phys_addr_started;\n+\tstruct vring_desc *desc;\n+\tuint32_t desc_offset;\n+\tstruct virtio_crypto_session_input *input;\n+\tint ret;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tif (session == NULL) {\n+\t\tPMD_SESSION_LOG(ERR, \"session is NULL.\");\n+\t\treturn -EINVAL;\n+\t}\n+\t/* cipher only is supported, it is available if auth_key is NULL */\n+\tif (!cipher_key) {\n+\t\tPMD_SESSION_LOG(ERR, \"cipher key is NULL.\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\thead = vq->vq_desc_head_idx;\n+\tPMD_INIT_LOG(DEBUG, \"vq->vq_desc_head_idx = %d, vq = %p\", head, vq);\n+\n+\tif (vq->vq_free_cnt < needed) {\n+\t\tPMD_SESSION_LOG(ERR, \"Not enough entry\");\n+\t\treturn -ENOSPC;\n+\t}\n+\n+\t/* calculate the length of cipher key */\n+\tif (cipher_key) {\n+\t\tswitch (ctrl->u.sym_create_session.op_type) {\n+\t\tcase VIRTIO_CRYPTO_SYM_OP_CIPHER:\n+\t\t\tlen_cipher_key\n+\t\t\t\t= ctrl->u.sym_create_session.u.cipher\n+\t\t\t\t\t\t\t.para.keylen;\n+\t\t\tbreak;\n+\t\tcase VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:\n+\t\t\tlen_cipher_key\n+\t\t\t\t= ctrl->u.sym_create_session.u.chain\n+\t\t\t\t\t.para.cipher_param.keylen;\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tPMD_SESSION_LOG(ERR, \"invalid op type\");\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t}\n+\n+\t/* calculate the length of auth key */\n+\tif (auth_key) {\n+\t\tlen_auth_key =\n+\t\t\tctrl->u.sym_create_session.u.chain.para.u.mac_param\n+\t\t\t\t.auth_key_len;\n+\t}\n+\n+\t/*\n+\t * malloc memory to store indirect vring_desc entries, including\n+\t * ctrl request, cipher key, auth key, session input and desc vring\n+\t */\n+\tdesc_offset = len_ctrl_req + len_cipher_key + len_auth_key\n+\t\t+ len_session_input;\n+\tvirt_addr_started = rte_malloc(NULL,\n+\t\tdesc_offset + NUM_ENTRY_SYM_CREATE_SESSION\n+\t\t\t* sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);\n+\tif (virt_addr_started == NULL) {\n+\t\tPMD_SESSION_LOG(ERR, \"not enough heap memory\");\n+\t\treturn -ENOSPC;\n+\t}\n+\tphys_addr_started = rte_malloc_virt2iova(virt_addr_started);\n+\n+\t/* address to store indirect vring desc entries */\n+\tdesc = (struct vring_desc *)\n+\t\t((uint8_t *)virt_addr_started + desc_offset);\n+\n+\t/*  ctrl req part */\n+\tmemcpy(virt_addr_started, ctrl, len_ctrl_req);\n+\tdesc[idx].addr = phys_addr_started;\n+\tdesc[idx].len = len_ctrl_req;\n+\tdesc[idx].flags = VRING_DESC_F_NEXT;\n+\tdesc[idx].next = idx + 1;\n+\tidx++;\n+\tlen_total += len_ctrl_req;\n+\tinput_offset += len_ctrl_req;\n+\n+\t/* cipher key part */\n+\tif (len_cipher_key > 0) {\n+\t\tmemcpy((uint8_t *)virt_addr_started + len_total,\n+\t\t\tcipher_key, len_cipher_key);\n+\n+\t\tdesc[idx].addr = phys_addr_started + len_total;\n+\t\tdesc[idx].len = len_cipher_key;\n+\t\tdesc[idx].flags = VRING_DESC_F_NEXT;\n+\t\tdesc[idx].next = idx + 1;\n+\t\tidx++;\n+\t\tlen_total += len_cipher_key;\n+\t\tinput_offset += len_cipher_key;\n+\t}\n+\n+\t/* auth key part */\n+\tif (len_auth_key > 0) {\n+\t\tmemcpy((uint8_t *)virt_addr_started + len_total,\n+\t\t\tauth_key, len_auth_key);\n+\n+\t\tdesc[idx].addr = phys_addr_started + len_total;\n+\t\tdesc[idx].len = len_auth_key;\n+\t\tdesc[idx].flags = VRING_DESC_F_NEXT;\n+\t\tdesc[idx].next = idx + 1;\n+\t\tidx++;\n+\t\tlen_total += len_auth_key;\n+\t\tinput_offset += len_auth_key;\n+\t}\n+\n+\t/* input part */\n+\tinput = (struct virtio_crypto_session_input *)\n+\t\t((uint8_t *)virt_addr_started + input_offset);\n+\tinput->status = VIRTIO_CRYPTO_ERR;\n+\tinput->session_id = ~0ULL;\n+\tdesc[idx].addr = phys_addr_started + len_total;\n+\tdesc[idx].len = len_session_input;\n+\tdesc[idx].flags = VRING_DESC_F_WRITE;\n+\tidx++;\n+\n+\t/* use a single desc entry */\n+\tvq->vq_ring.desc[head].addr = phys_addr_started + desc_offset;\n+\tvq->vq_ring.desc[head].len = idx * sizeof(struct vring_desc);\n+\tvq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;\n+\tvq->vq_free_cnt--;\n+\n+\tvq->vq_desc_head_idx = vq->vq_ring.desc[head].next;\n+\n+\tvq_update_avail_ring(vq, head);\n+\tvq_update_avail_idx(vq);\n+\n+\tPMD_INIT_LOG(DEBUG, \"vq->vq_queue_index = %d\", vq->vq_queue_index);\n+\n+\tvirtqueue_notify(vq);\n+\n+\trte_rmb();\n+\twhile (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {\n+\t\trte_rmb();\n+\t\tusleep(100);\n+\t}\n+\n+\twhile (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {\n+\t\tuint32_t idx, desc_idx, used_idx;\n+\t\tstruct vring_used_elem *uep;\n+\n+\t\tused_idx = (uint32_t)(vq->vq_used_cons_idx\n+\t\t\t\t& (vq->vq_nentries - 1));\n+\t\tuep = &vq->vq_ring.used->ring[used_idx];\n+\t\tidx = (uint32_t) uep->id;\n+\t\tdesc_idx = idx;\n+\n+\t\twhile (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) {\n+\t\t\tdesc_idx = vq->vq_ring.desc[desc_idx].next;\n+\t\t\tvq->vq_free_cnt++;\n+\t\t}\n+\n+\t\tvq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;\n+\t\tvq->vq_desc_head_idx = idx;\n+\n+\t\tvq->vq_used_cons_idx++;\n+\t\tvq->vq_free_cnt++;\n+\t}\n+\n+\tPMD_INIT_LOG(DEBUG, \"vq->vq_free_cnt=%d\\nvq->vq_desc_head_idx=%d\",\n+\t\t\tvq->vq_free_cnt, vq->vq_desc_head_idx);\n+\n+\t/* get the result */\n+\tif (input->status != VIRTIO_CRYPTO_OK) {\n+\t\tPMD_SESSION_LOG(ERR, \"Something wrong on backend! \"\n+\t\t\t\t\"status=%\"PRIu32\", session_id=%\"PRIu64\"\",\n+\t\t\t\tinput->status, input->session_id);\n+\t\trte_free(virt_addr_started);\n+\t\tret = -1;\n+\t} else {\n+\t\tsession->session_id = input->session_id;\n+\n+\t\tPMD_SESSION_LOG(INFO, \"Create session successfully, \"\n+\t\t\t\t\"session_id=%\"PRIu64\"\", input->session_id);\n+\t\trte_free(virt_addr_started);\n+\t\tret = 0;\n+\t}\n+\n+\treturn ret;\n+}\n+\n+void virtio_crypto_queue_release(struct virtqueue *vq)\n+{\n+\tstruct virtio_crypto_hw *hw;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tif (vq) {\n+\t\thw = vq->hw;\n+\t\t/* Select and deactivate the queue */\n+\t\tVTPCI_OPS(hw)->del_queue(hw, vq);\n+\n+\t\trte_memzone_free(vq->mz);\n+\t\trte_mempool_free(vq->mpool);\n+\t\trte_free(vq);\n+\t}\n+}\n+\n+#define MPOOL_MAX_NAME_SZ 32\n+\n+int virtio_crypto_queue_setup(struct rte_cryptodev *dev,\n+\t\tint queue_type,\n+\t\tuint16_t vtpci_queue_idx,\n+\t\tuint16_t nb_desc,\n+\t\tint socket_id,\n+\t\tstruct virtqueue **pvq)\n+{\n+\tchar vq_name[VIRTQUEUE_MAX_NAME_SZ];\n+\tchar mpool_name[MPOOL_MAX_NAME_SZ];\n+\tconst struct rte_memzone *mz;\n+\tunsigned int vq_size, size;\n+\tstruct virtio_crypto_hw *hw = dev->data->dev_private;\n+\tstruct virtqueue *vq = NULL;\n+\tuint32_t i = 0;\n+\tuint32_t j;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tPMD_INIT_LOG(DEBUG, \"setting up queue: %u\", vtpci_queue_idx);\n+\n+\t/*\n+\t * Read the virtqueue size from the Queue Size field\n+\t * Always power of 2 and if 0 virtqueue does not exist\n+\t */\n+\tvq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);\n+\tif (vq_size == 0) {\n+\t\tPMD_INIT_LOG(ERR, \"virtqueue does not exist\");\n+\t\treturn -EINVAL;\n+\t}\n+\tPMD_INIT_LOG(DEBUG, \"vq_size: %u\", vq_size);\n+\n+\tif (!rte_is_power_of_2(vq_size)) {\n+\t\tPMD_INIT_LOG(ERR, \"virtqueue size is not powerof 2\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (queue_type == VTCRYPTO_DATAQ) {\n+\t\tsnprintf(vq_name, sizeof(vq_name), \"dev%d_dataqueue%d\",\n+\t\t\t\tdev->data->dev_id, vtpci_queue_idx);\n+\t\tsnprintf(mpool_name, sizeof(mpool_name), \"dev%d_dataqueue%d_mpool\",\n+\t\t\t\tdev->data->dev_id, vtpci_queue_idx);\n+\t} else if (queue_type == VTCRYPTO_CTRLQ) {\n+\t\tsnprintf(vq_name, sizeof(vq_name), \"dev%d_controlqueue\",\n+\t\t\t\tdev->data->dev_id);\n+\t\tsnprintf(mpool_name, sizeof(mpool_name), \"dev%d_controlqueue_mpool\",\n+\t\t\t\tdev->data->dev_id);\n+\t}\n+\tsize = RTE_ALIGN_CEIL(sizeof(*vq) +\n+\t\t\t\tvq_size * sizeof(struct vq_desc_extra),\n+\t\t\t\tRTE_CACHE_LINE_SIZE);\n+\tvq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,\n+\t\t\t\tsocket_id);\n+\tif (vq == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Can not allocate virtqueue\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tif (queue_type == VTCRYPTO_DATAQ) {\n+\t\t/* pre-allocate a mempool and use it in the data plane to\n+\t\t * improve performance\n+\t\t */\n+\t\tvq->mpool = rte_mempool_lookup(mpool_name);\n+\t\tif (vq->mpool == NULL)\n+\t\t\tvq->mpool = rte_mempool_create(mpool_name,\n+\t\t\t\t\tvq_size,\n+\t\t\t\t\tsizeof(struct virtio_crypto_op_cookie),\n+\t\t\t\t\tRTE_CACHE_LINE_SIZE, 0,\n+\t\t\t\t\tNULL, NULL, NULL, NULL, socket_id,\n+\t\t\t\t\t0);\n+\t\tif (!vq->mpool) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Virtio Crypto PMD Cannot create\"\n+\t\t\t\t\t\" mempool\");\n+\t\t\tgoto mpool_create_err;\n+\t\t}\n+\t\tfor (i = 0; i < vq_size; i++) {\n+\t\t\tvq->vq_descx[i].cookie =\n+\t\t\t\trte_zmalloc(\"crypto PMD op cookie pointer\",\n+\t\t\t\t\tsizeof(struct virtio_crypto_op_cookie),\n+\t\t\t\t\tRTE_CACHE_LINE_SIZE);\n+\t\t\tif (vq->vq_descx[i].cookie == NULL) {\n+\t\t\t\tPMD_DRV_LOG(ERR, \"Failed to alloc mem for cookie\");\n+\t\t\t\tgoto cookie_alloc_err;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\tvq->hw = hw;\n+\tvq->dev_id = dev->data->dev_id;\n+\tvq->vq_queue_index = vtpci_queue_idx;\n+\tvq->vq_nentries = vq_size;\n+\n+\t/*\n+\t * Using part of the vring entries is permitted, but the maximum\n+\t * is vq_size\n+\t */\n+\tif (nb_desc == 0 || nb_desc > vq_size)\n+\t\tnb_desc = vq_size;\n+\tvq->vq_free_cnt = nb_desc;\n+\n+\t/*\n+\t * Reserve a memzone for vring elements\n+\t */\n+\tsize = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);\n+\tvq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);\n+\tPMD_INIT_LOG(DEBUG, \"%s vring_size: %d, rounded_vring_size: %d\",\n+\t\t\t(queue_type == VTCRYPTO_DATAQ) ? \"dataq\" : \"ctrlq\",\n+\t\t\tsize, vq->vq_ring_size);\n+\n+\tmz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,\n+\t\t\tsocket_id, 0, VIRTIO_PCI_VRING_ALIGN);\n+\tif (mz == NULL) {\n+\t\tif (rte_errno == EEXIST)\n+\t\t\tmz = rte_memzone_lookup(vq_name);\n+\t\tif (mz == NULL) {\n+\t\t\tPMD_INIT_LOG(ERR, \"not enough memory\");\n+\t\t\tgoto mz_reserve_err;\n+\t\t}\n+\t}\n+\n+\t/*\n+\t * Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,\n+\t * and only accepts 32 bit page frame number.\n+\t * Check if the allocated physical memory exceeds 16TB.\n+\t */\n+\tif ((mz->phys_addr + vq->vq_ring_size - 1)\n+\t\t\t\t>> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {\n+\t\tPMD_INIT_LOG(ERR, \"vring address shouldn't be above 16TB!\");\n+\t\tgoto vring_addr_err;\n+\t}\n+\n+\tmemset(mz->addr, 0, sizeof(mz->len));\n+\tvq->mz = mz;\n+\tvq->vq_ring_mem = mz->phys_addr;\n+\tvq->vq_ring_virt_mem = mz->addr;\n+\tPMD_INIT_LOG(DEBUG, \"vq->vq_ring_mem(physical): 0x%\"PRIx64,\n+\t\t\t\t\t(uint64_t)mz->phys_addr);\n+\tPMD_INIT_LOG(DEBUG, \"vq->vq_ring_virt_mem: 0x%\"PRIx64,\n+\t\t\t\t\t(uint64_t)(uintptr_t)mz->addr);\n+\n+\t*pvq = vq;\n+\n+\treturn 0;\n+\n+vring_addr_err:\n+\trte_memzone_free(mz);\n+mz_reserve_err:\n+cookie_alloc_err:\n+\trte_mempool_free(vq->mpool);\n+\tif (i != 0) {\n+\t\tfor (j = 0; j < i; j++)\n+\t\t\trte_free(vq->vq_descx[j].cookie);\n+\t}\n+mpool_create_err:\n+\trte_free(vq);\n+\treturn -ENOMEM;\n+}\n+\n+static int\n+virtio_crypto_ctrlq_setup(struct rte_cryptodev *dev, uint16_t queue_idx)\n+{\n+\tint ret;\n+\tstruct virtqueue *vq;\n+\tstruct virtio_crypto_hw *hw = dev->data->dev_private;\n+\n+\t/* if virtio device has started, do not touch the virtqueues */\n+\tif (dev->data->dev_started)\n+\t\treturn 0;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tret = virtio_crypto_queue_setup(dev, VTCRYPTO_CTRLQ, queue_idx,\n+\t\t\t0, SOCKET_ID_ANY, &vq);\n+\tif (ret < 0) {\n+\t\tPMD_INIT_LOG(ERR, \"control vq initialization failed\");\n+\t\treturn ret;\n+\t}\n+\n+\thw->cvq = vq;\n+\n+\treturn 0;\n+}\n+\n+static void\n+virtio_crypto_free_queues(struct rte_cryptodev *dev)\n+{\n+\tunsigned int i;\n+\tstruct virtio_crypto_hw *hw = dev->data->dev_private;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\t/* control queue release */\n+\tvirtio_crypto_queue_release(hw->cvq);\n+\n+\t/* data queue release */\n+\tfor (i = 0; i < hw->max_dataqueues; i++)\n+\t\tvirtio_crypto_queue_release(dev->data->queue_pairs[i]);\n+}\n+\n+static int\n+virtio_crypto_dev_close(struct rte_cryptodev *dev __rte_unused)\n+{\n+\treturn 0;\n+}\n+\n+/*\n+ * dev_ops for virtio, bare necessities for basic operation\n+ */\n+static struct rte_cryptodev_ops virtio_crypto_dev_ops = {\n+\t/* Device related operations */\n+\t.dev_configure\t\t\t = virtio_crypto_dev_configure,\n+\t.dev_start\t\t\t     = virtio_crypto_dev_start,\n+\t.dev_stop\t\t\t     = virtio_crypto_dev_stop,\n+\t.dev_close\t\t\t     = virtio_crypto_dev_close,\n+\t.dev_infos_get\t\t\t = virtio_crypto_dev_info_get,\n+\n+\t.stats_get\t\t\t     = virtio_crypto_dev_stats_get,\n+\t.stats_reset\t\t\t = virtio_crypto_dev_stats_reset,\n+\n+\t.queue_pair_setup                = virtio_crypto_qp_setup,\n+\t.queue_pair_release              = virtio_crypto_qp_release,\n+\t.queue_pair_start                = NULL,\n+\t.queue_pair_stop                 = NULL,\n+\t.queue_pair_count                = NULL,\n+\n+\t/* Crypto related operations */\n+\t.session_get_size\t= virtio_crypto_sym_get_session_private_size,\n+\t.session_configure\t= virtio_crypto_sym_configure_session,\n+\t.session_clear\t\t= virtio_crypto_sym_clear_session,\n+\t.qp_attach_session = NULL,\n+\t.qp_detach_session = NULL\n+};\n+\n+static void\n+virtio_crypto_update_stats(struct rte_cryptodev *dev,\n+\t\tstruct rte_cryptodev_stats *stats)\n+{\n+\tunsigned int i;\n+\tstruct virtio_crypto_hw *hw = dev->data->dev_private;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tif (stats == NULL) {\n+\t\tPMD_DRV_LOG(ERR, \"invalid pointer\");\n+\t\treturn;\n+\t}\n+\n+\tfor (i = 0; i < hw->max_dataqueues; i++) {\n+\t\tconst struct virtqueue *data_queue\n+\t\t\t= dev->data->queue_pairs[i];\n+\t\tif (data_queue == NULL)\n+\t\t\tcontinue;\n+\n+\t\tstats->enqueued_count += data_queue->packets_sent_total;\n+\t\tstats->enqueue_err_count += data_queue->packets_sent_failed;\n+\n+\t\tstats->dequeued_count += data_queue->packets_received_total;\n+\t\tstats->dequeue_err_count\n+\t\t\t+= data_queue->packets_received_failed;\n+\t}\n+}\n+\n+static void\n+virtio_crypto_dev_stats_get(struct rte_cryptodev *dev,\n+\t\tstruct rte_cryptodev_stats *stats)\n+{\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tvirtio_crypto_update_stats(dev, stats);\n+}\n+\n+static void\n+virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev)\n+{\n+\tunsigned int i;\n+\tstruct virtio_crypto_hw *hw = dev->data->dev_private;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tfor (i = 0; i < hw->max_dataqueues; i++) {\n+\t\tstruct virtqueue *data_queue = dev->data->queue_pairs[i];\n+\t\tif (data_queue == NULL)\n+\t\t\tcontinue;\n+\n+\t\tdata_queue->packets_sent_total = 0;\n+\t\tdata_queue->packets_sent_failed = 0;\n+\n+\t\tdata_queue->packets_received_total = 0;\n+\t\tdata_queue->packets_received_failed = 0;\n+\t}\n+}\n+\n+static int\n+virtio_crypto_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,\n+\t\tconst struct rte_cryptodev_qp_conf *qp_conf,\n+\t\tint socket_id,\n+\t\tstruct rte_mempool *session_pool __rte_unused)\n+{\n+\tint ret;\n+\tstruct virtqueue *vq;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\t/* if virtio dev is started, do not touch the virtqueues */\n+\tif (dev->data->dev_started)\n+\t\treturn 0;\n+\n+\tret = virtio_crypto_queue_setup(dev, VTCRYPTO_DATAQ, queue_pair_id,\n+\t\t\tqp_conf->nb_descriptors, socket_id, &vq);\n+\tif (ret < 0) {\n+\t\tPMD_INIT_LOG(ERR,\n+\t\t\t\"virtio crypto data queue initialization failed\\n\");\n+\t\treturn ret;\n+\t}\n+\n+\tdev->data->queue_pairs[queue_pair_id] = vq;\n+\n+\treturn 0;\n+}\n+\n+static int\n+virtio_crypto_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)\n+{\n+\tstruct virtqueue *vq\n+\t\t= (struct virtqueue *)dev->data->queue_pairs[queue_pair_id];\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tif (vq == NULL) {\n+\t\tPMD_DRV_LOG(DEBUG, \"vq already freed\");\n+\t\treturn 0;\n+\t}\n+\n+\tvirtio_crypto_queue_release(vq);\n+\treturn 0;\n+}\n+\n+static int\n+virtio_negotiate_features(struct virtio_crypto_hw *hw, uint64_t req_features)\n+{\n+\tuint64_t host_features;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\t/* Prepare guest_features: feature that driver wants to support */\n+\tPMD_INIT_LOG(DEBUG, \"guest_features before negotiate = %\" PRIx64,\n+\t\treq_features);\n+\n+\t/* Read device(host) feature bits */\n+\thost_features = VTPCI_OPS(hw)->get_features(hw);\n+\tPMD_INIT_LOG(DEBUG, \"host_features before negotiate = %\" PRIx64,\n+\t\thost_features);\n+\n+\t/*\n+\t * Negotiate features: Subset of device feature bits are written back\n+\t * guest feature bits.\n+\t */\n+\thw->guest_features = req_features;\n+\thw->guest_features = vtpci_cryptodev_negotiate_features(hw,\n+\t\t\t\t\t\t\thost_features);\n+\tPMD_INIT_LOG(DEBUG, \"features after negotiate = %\" PRIx64,\n+\t\thw->guest_features);\n+\n+\tif (hw->modern) {\n+\t\tif (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {\n+\t\t\tPMD_INIT_LOG(ERR,\n+\t\t\t\t\"VIRTIO_F_VERSION_1 features is not enabled.\");\n+\t\t\treturn -1;\n+\t\t}\n+\t\tvtpci_cryptodev_set_status(hw,\n+\t\t\tVIRTIO_CONFIG_STATUS_FEATURES_OK);\n+\t\tif (!(vtpci_cryptodev_get_status(hw) &\n+\t\t\tVIRTIO_CONFIG_STATUS_FEATURES_OK)) {\n+\t\t\tPMD_INIT_LOG(ERR, \"failed to set FEATURES_OK status!\");\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\thw->req_guest_features = req_features;\n+\n+\treturn 0;\n+}\n+\n+/* reset device and renegotiate features if needed */\n+static int\n+virtio_crypto_init_device(struct rte_cryptodev *cryptodev,\n+\tuint64_t req_features)\n+{\n+\tstruct virtio_crypto_hw *hw = cryptodev->data->dev_private;\n+\tstruct virtio_crypto_config local_config;\n+\tstruct virtio_crypto_config *config = &local_config;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\t/* Reset the device although not necessary at startup */\n+\tvtpci_cryptodev_reset(hw);\n+\n+\t/* Tell the host we've noticed this device. */\n+\tvtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);\n+\n+\t/* Tell the host we've known how to drive the device. */\n+\tvtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);\n+\tif (virtio_negotiate_features(hw, req_features) < 0)\n+\t\treturn -1;\n+\n+\t/* Get status of the device */\n+\tvtpci_read_cryptodev_config(hw,\n+\t\toffsetof(struct virtio_crypto_config, status),\n+\t\t&config->status, sizeof(config->status));\n+\tif (config->status != VIRTIO_CRYPTO_S_HW_READY) {\n+\t\tPMD_DRV_LOG(ERR, \"accelerator hardware is \"\n+\t\t\t\t\"not ready\");\n+\t\treturn -1;\n+\t}\n+\n+\t/* Get number of data queues */\n+\tvtpci_read_cryptodev_config(hw,\n+\t\toffsetof(struct virtio_crypto_config, max_dataqueues),\n+\t\t&config->max_dataqueues,\n+\t\tsizeof(config->max_dataqueues));\n+\thw->max_dataqueues = config->max_dataqueues;\n+\n+\tPMD_INIT_LOG(DEBUG, \"hw->max_dataqueues=%d\",\n+\t\thw->max_dataqueues);\n+\n+\treturn 0;\n+}\n+\n+/*\n+ * This function is based on probe() function\n+ * It returns 0 on success.\n+ */\n+static int\n+crypto_virtio_create(const char *name, struct rte_pci_device *pci_dev,\n+\t\tstruct rte_cryptodev_pmd_init_params *init_params)\n+{\n+\tstruct rte_cryptodev *cryptodev;\n+\tstruct virtio_crypto_hw *hw;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tcryptodev = rte_cryptodev_pmd_create(name, &pci_dev->device,\n+\t\t\t\t\tinit_params);\n+\tif (cryptodev == NULL)\n+\t\treturn -ENODEV;\n+\n+\tcryptodev->driver_id = cryptodev_virtio_driver_id;\n+\tcryptodev->dev_ops = &virtio_crypto_dev_ops;\n+\n+\tcryptodev->enqueue_burst = virtio_crypto_pkt_tx_burst;\n+\tcryptodev->dequeue_burst = virtio_crypto_pkt_rx_burst;\n+\n+\tcryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |\n+\t\tRTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;\n+\n+\thw = cryptodev->data->dev_private;\n+\thw->dev_id = cryptodev->data->dev_id;\n+\thw->virtio_dev_capabilities = virtio_capabilities;\n+\n+\tPMD_INIT_LOG(DEBUG, \"dev %d vendorID=0x%x deviceID=0x%x\",\n+\t\tcryptodev->data->dev_id, pci_dev->id.vendor_id,\n+\t\tpci_dev->id.device_id);\n+\n+\t/* pci device init */\n+\tif (vtpci_cryptodev_init(pci_dev, hw))\n+\t\treturn -1;\n+\n+\tif (virtio_crypto_init_device(cryptodev,\n+\t\t\tVIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)\n+\t\treturn -1;\n+\n+\treturn 0;\n+}\n+\n+static int\n+virtio_crypto_dev_uninit(struct rte_cryptodev *cryptodev)\n+{\n+\tstruct virtio_crypto_hw *hw = cryptodev->data->dev_private;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tif (rte_eal_process_type() == RTE_PROC_SECONDARY)\n+\t\treturn -EPERM;\n+\n+\tif (cryptodev->data->dev_started) {\n+\t\tvirtio_crypto_dev_stop(cryptodev);\n+\t\tvirtio_crypto_dev_close(cryptodev);\n+\t}\n+\n+\tcryptodev->dev_ops = NULL;\n+\tcryptodev->enqueue_burst = NULL;\n+\tcryptodev->dequeue_burst = NULL;\n+\n+\t/* release control queue */\n+\tvirtio_crypto_queue_release(hw->cvq);\n+\n+\trte_free(cryptodev->data);\n+\tcryptodev->data = NULL;\n+\n+\tPMD_DRV_LOG(INFO, \"dev_uninit completed\");\n+\n+\treturn 0;\n+}\n+\n+static int\n+virtio_crypto_dev_configure(struct rte_cryptodev *cryptodev,\n+\tstruct rte_cryptodev_config *config __rte_unused)\n+{\n+\tstruct virtio_crypto_hw *hw = cryptodev->data->dev_private;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tif (virtio_crypto_init_device(cryptodev,\n+\t\t\tVIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)\n+\t\treturn -1;\n+\n+\t/* setup control queue\n+\t * [0, 1, ... ,(config->max_dataqueues - 1)] are data queues\n+\t * config->max_dataqueues is the control queue\n+\t */\n+\tif (virtio_crypto_ctrlq_setup(cryptodev, hw->max_dataqueues) < 0) {\n+\t\tPMD_INIT_LOG(ERR, \"control queue setup error\");\n+\t\treturn -1;\n+\t}\n+\tvirtio_crypto_ctrlq_start(cryptodev);\n+\n+\treturn 0;\n+}\n+\n+static void\n+virtio_crypto_dev_stop(struct rte_cryptodev *dev)\n+{\n+\tstruct virtio_crypto_hw *hw = dev->data->dev_private;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\tPMD_DRV_LOG(DEBUG, \"virtio_dev_stop\");\n+\n+\tvtpci_cryptodev_reset(hw);\n+\n+\tvirtio_crypto_dev_free_mbufs(dev);\n+\tvirtio_crypto_free_queues(dev);\n+\n+\tdev->data->dev_started = 0;\n+}\n+\n+static int\n+virtio_crypto_dev_start(struct rte_cryptodev *dev)\n+{\n+\tstruct virtio_crypto_hw *hw = dev->data->dev_private;\n+\n+\tif (dev->data->dev_started)\n+\t\treturn 0;\n+\n+\t/* Do final configuration before queue engine starts */\n+\tvirtio_crypto_dataq_start(dev);\n+\tvtpci_cryptodev_reinit_complete(hw);\n+\n+\tdev->data->dev_started = 1;\n+\n+\treturn 0;\n+}\n+\n+static void virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev)\n+{\n+\tuint32_t i;\n+\tstruct virtio_crypto_hw *hw = dev->data->dev_private;\n+\n+\tfor (i = 0; i < hw->max_dataqueues; i++) {\n+\t\tPMD_INIT_LOG(DEBUG, \"Before freeing dataq[%d] used \"\n+\t\t\t\"and unused buf\", i);\n+\t\tVIRTQUEUE_DUMP((struct virtqueue *)\n+\t\t\tdev->data->queue_pairs[i]);\n+\n+\t\tPMD_INIT_LOG(DEBUG, \"queue_pairs[%d]=%p\",\n+\t\t\t\ti, dev->data->queue_pairs[i]);\n+\n+\t\tvirtqueue_detatch_unused(dev->data->queue_pairs[i]);\n+\n+\t\tPMD_INIT_LOG(DEBUG, \"After freeing dataq[%d] used and \"\n+\t\t\t\t\t\"unused buf\", i);\n+\t\tVIRTQUEUE_DUMP(\n+\t\t\t(struct virtqueue *)dev->data->queue_pairs[i]);\n+\t}\n+}\n+\n+static unsigned int virtio_crypto_sym_get_session_private_size(\n+\t\tstruct rte_cryptodev *dev __rte_unused)\n+{\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\treturn RTE_ALIGN_CEIL(sizeof(struct virtio_crypto_session), 16);\n+}\n+\n+static int virtio_crypto_check_sym_session_paras(\n+\t\tstruct rte_cryptodev *dev)\n+{\n+\tstruct virtio_crypto_hw *hw;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tif (unlikely(dev == NULL)) {\n+\t\tPMD_SESSION_LOG(ERR, \"dev is NULL\");\n+\t\treturn -1;\n+\t}\n+\tif (unlikely(dev->data == NULL)) {\n+\t\tPMD_SESSION_LOG(ERR, \"dev->data is NULL\");\n+\t\treturn -1;\n+\t}\n+\thw = dev->data->dev_private;\n+\tif (unlikely(hw == NULL)) {\n+\t\tPMD_SESSION_LOG(ERR, \"hw is NULL\");\n+\t\treturn -1;\n+\t}\n+\tif (unlikely(hw->cvq == NULL)) {\n+\t\tPMD_SESSION_LOG(ERR, \"vq is NULL\");\n+\t\treturn -1;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int virtio_crypto_check_sym_clear_session_paras(\n+\t\tstruct rte_cryptodev *dev,\n+\t\tstruct rte_cryptodev_sym_session *sess)\n+{\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tif (sess == NULL) {\n+\t\tPMD_SESSION_LOG(ERR, \"sym_session is NULL\");\n+\t\treturn -1;\n+\t}\n+\n+\treturn virtio_crypto_check_sym_session_paras(dev);\n+}\n+\n+#define NUM_ENTRY_SYM_CLEAR_SESSION 2\n+\n+static void  virtio_crypto_sym_clear_session(\n+\t\tstruct rte_cryptodev *dev,\n+\t\tstruct rte_cryptodev_sym_session *sess)\n+{\n+\tstruct virtio_crypto_hw *hw;\n+\tstruct virtqueue *vq;\n+\tstruct virtio_crypto_session *session;\n+\tstruct virtio_crypto_op_ctrl_req *ctrl;\n+\tstruct vring_desc *desc;\n+\tuint8_t *status;\n+\tuint8_t needed = 1;\n+\tuint32_t head;\n+\tuint8_t *malloc_virt_addr;\n+\tuint64_t malloc_phys_addr;\n+\tuint8_t len_inhdr = sizeof(struct virtio_crypto_inhdr);\n+\tuint32_t len_op_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);\n+\tuint32_t desc_offset = len_op_ctrl_req + len_inhdr;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tif (virtio_crypto_check_sym_clear_session_paras(dev, sess) < 0)\n+\t\treturn;\n+\n+\thw = dev->data->dev_private;\n+\tvq = hw->cvq;\n+\tsession = (struct virtio_crypto_session *)get_session_private_data(\n+\t\tsess, cryptodev_virtio_driver_id);\n+\tif (session == NULL) {\n+\t\tPMD_SESSION_LOG(ERR, \"Invalid session parameter\");\n+\t\treturn;\n+\t}\n+\n+\tPMD_SESSION_LOG(INFO, \"vq->vq_desc_head_idx = %d, \"\n+\t\t\t\"vq = %p\", vq->vq_desc_head_idx, vq);\n+\n+\tif (vq->vq_free_cnt < needed) {\n+\t\tPMD_SESSION_LOG(ERR,\n+\t\t\t\t\"vq->vq_free_cnt = %d is less than %d, \"\n+\t\t\t\t\"not enough\", vq->vq_free_cnt, needed);\n+\t\treturn;\n+\t}\n+\n+\t/*\n+\t * malloc memory to store information of ctrl request op,\n+\t * returned status and desc vring\n+\t */\n+\tmalloc_virt_addr = rte_malloc(NULL, len_op_ctrl_req + len_inhdr\n+\t\t+ NUM_ENTRY_SYM_CLEAR_SESSION\n+\t\t* sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);\n+\tif (malloc_virt_addr == NULL) {\n+\t\tPMD_SESSION_LOG(ERR, \"not enough heap room\");\n+\t\treturn;\n+\t}\n+\tmalloc_phys_addr = rte_malloc_virt2iova(malloc_virt_addr);\n+\n+\t/* assign ctrl request op part */\n+\tctrl = (struct virtio_crypto_op_ctrl_req *)malloc_virt_addr;\n+\tctrl->header.opcode = VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION;\n+\t/* default data virtqueue is 0 */\n+\tctrl->header.queue_id = 0;\n+\tctrl->u.destroy_session.session_id = session->session_id;\n+\n+\t/* status part */\n+\tstatus = &(((struct virtio_crypto_inhdr *)\n+\t\t((uint8_t *)malloc_virt_addr + len_op_ctrl_req))->status);\n+\t*status = VIRTIO_CRYPTO_ERR;\n+\n+\t/* indirect desc vring part */\n+\tdesc = (struct vring_desc *)((uint8_t *)malloc_virt_addr\n+\t\t+ desc_offset);\n+\n+\t/* ctrl request part */\n+\tdesc[0].addr = malloc_phys_addr;\n+\tdesc[0].len = len_op_ctrl_req;\n+\tdesc[0].flags = VRING_DESC_F_NEXT;\n+\tdesc[0].next = 1;\n+\n+\t/* status part */\n+\tdesc[1].addr = malloc_phys_addr + len_op_ctrl_req;\n+\tdesc[1].len = len_inhdr;\n+\tdesc[1].flags = VRING_DESC_F_WRITE;\n+\n+\t/* use only a single desc entry */\n+\thead = vq->vq_desc_head_idx;\n+\tvq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;\n+\tvq->vq_ring.desc[head].addr = malloc_phys_addr + desc_offset;\n+\tvq->vq_ring.desc[head].len\n+\t\t= NUM_ENTRY_SYM_CLEAR_SESSION\n+\t\t* sizeof(struct vring_desc);\n+\n+\tvq->vq_free_cnt -= needed;\n+\n+\tvq->vq_desc_head_idx = vq->vq_ring.desc[head].next;\n+\n+\tvq_update_avail_ring(vq, head);\n+\tvq_update_avail_idx(vq);\n+\n+\tPMD_INIT_LOG(DEBUG, \"vq->vq_queue_index = %d\", vq->vq_queue_index);\n+\n+\tvirtqueue_notify(vq);\n+\n+\trte_rmb();\n+\twhile (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {\n+\t\trte_rmb();\n+\t\tusleep(100);\n+\t}\n+\n+\twhile (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {\n+\t\tuint32_t idx, desc_idx, used_idx;\n+\t\tstruct vring_used_elem *uep;\n+\n+\t\tused_idx = (uint32_t)(vq->vq_used_cons_idx\n+\t\t\t\t& (vq->vq_nentries - 1));\n+\t\tuep = &vq->vq_ring.used->ring[used_idx];\n+\t\tidx = (uint32_t) uep->id;\n+\t\tdesc_idx = idx;\n+\t\twhile (vq->vq_ring.desc[desc_idx].flags\n+\t\t\t\t& VRING_DESC_F_NEXT) {\n+\t\t\tdesc_idx = vq->vq_ring.desc[desc_idx].next;\n+\t\t\tvq->vq_free_cnt++;\n+\t\t}\n+\n+\t\tvq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;\n+\t\tvq->vq_desc_head_idx = idx;\n+\t\tvq->vq_used_cons_idx++;\n+\t\tvq->vq_free_cnt++;\n+\t}\n+\n+\tif (*status != VIRTIO_CRYPTO_OK) {\n+\t\tPMD_SESSION_LOG(ERR, \"Close session failed \"\n+\t\t\t\t\"status=%\"PRIu32\", session_id=%\"PRIu64\"\",\n+\t\t\t\t*status, session->session_id);\n+\t\trte_free(malloc_virt_addr);\n+\t\treturn;\n+\t}\n+\n+\tPMD_INIT_LOG(DEBUG, \"vq->vq_free_cnt=%d\\nvq->vq_desc_head_idx=%d\",\n+\t\t\tvq->vq_free_cnt, vq->vq_desc_head_idx);\n+\n+\tPMD_SESSION_LOG(INFO, \"Close session %\"PRIu64\" successfully \",\n+\t\t\tsession->session_id);\n+\n+\tmemset(sess, 0, sizeof(struct virtio_crypto_session));\n+\trte_free(malloc_virt_addr);\n+}\n+\n+static struct rte_crypto_cipher_xform *\n+virtio_crypto_get_cipher_xform(struct rte_crypto_sym_xform *xform)\n+{\n+\tdo {\n+\t\tif (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)\n+\t\t\treturn &xform->cipher;\n+\n+\t\txform = xform->next;\n+\t} while (xform);\n+\n+\treturn NULL;\n+}\n+\n+static struct rte_crypto_auth_xform *\n+virtio_crypto_get_auth_xform(struct rte_crypto_sym_xform *xform)\n+{\n+\tdo {\n+\t\tif (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)\n+\t\t\treturn &xform->auth;\n+\n+\t\txform = xform->next;\n+\t} while (xform);\n+\n+\treturn NULL;\n+}\n+\n+/** Get xform chain order */\n+static int\n+virtio_crypto_get_chain_order(struct rte_crypto_sym_xform *xform)\n+{\n+\tif (xform == NULL)\n+\t\treturn -1;\n+\n+\t/* Cipher Only */\n+\tif (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&\n+\t\t\txform->next == NULL)\n+\t\treturn VIRTIO_CRYPTO_CMD_CIPHER;\n+\n+\t/* Authentication Only */\n+\tif (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&\n+\t\t\txform->next == NULL)\n+\t\treturn VIRTIO_CRYPTO_CMD_AUTH;\n+\n+\t/* Authenticate then Cipher */\n+\tif (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&\n+\t\t\txform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)\n+\t\treturn VIRTIO_CRYPTO_CMD_HASH_CIPHER;\n+\n+\t/* Cipher then Authenticate */\n+\tif (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&\n+\t\t\txform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)\n+\t\treturn VIRTIO_CRYPTO_CMD_CIPHER_HASH;\n+\n+\treturn -1;\n+}\n+\n+static int virtio_crypto_sym_pad_cipher_param(\n+\t\tstruct virtio_crypto_cipher_session_para *para,\n+\t\tstruct rte_crypto_cipher_xform *cipher_xform)\n+{\n+\tswitch (cipher_xform->algo) {\n+\tcase RTE_CRYPTO_CIPHER_NULL:\n+\t\tpara->algo = VIRTIO_CRYPTO_NO_CIPHER;\n+\t\tbreak;\n+\tcase RTE_CRYPTO_CIPHER_3DES_CBC:\n+\t\tpara->algo = VIRTIO_CRYPTO_CIPHER_3DES_CBC;\n+\t\tbreak;\n+\tcase RTE_CRYPTO_CIPHER_3DES_CTR:\n+\t\tpara->algo = VIRTIO_CRYPTO_CIPHER_3DES_CTR;\n+\t\tbreak;\n+\tcase RTE_CRYPTO_CIPHER_3DES_ECB:\n+\t\tpara->algo = VIRTIO_CRYPTO_CIPHER_3DES_ECB;\n+\t\tbreak;\n+\tcase RTE_CRYPTO_CIPHER_AES_CBC:\n+\t\tpara->algo = VIRTIO_CRYPTO_CIPHER_AES_CBC;\n+\t\tbreak;\n+\tcase  RTE_CRYPTO_CIPHER_AES_CTR:\n+\t\tpara->algo = VIRTIO_CRYPTO_CIPHER_AES_CTR;\n+\t\tbreak;\n+\tcase RTE_CRYPTO_CIPHER_AES_ECB:\n+\t\tpara->algo = VIRTIO_CRYPTO_CIPHER_AES_ECB;\n+\t\tbreak;\n+\tcase RTE_CRYPTO_CIPHER_AES_F8:\n+\t\tpara->algo = VIRTIO_CRYPTO_CIPHER_AES_F8;\n+\t\tbreak;\n+\tcase RTE_CRYPTO_CIPHER_AES_XTS:\n+\t\tpara->algo = VIRTIO_CRYPTO_CIPHER_AES_XTS;\n+\t\tbreak;\n+\tcase RTE_CRYPTO_CIPHER_ARC4:\n+\t\tpara->algo = VIRTIO_CRYPTO_CIPHER_ARC4;\n+\t\tbreak;\n+\tcase RTE_CRYPTO_CIPHER_KASUMI_F8:\n+\t\tpara->algo = VIRTIO_CRYPTO_CIPHER_KASUMI_F8;\n+\t\tbreak;\n+\tcase RTE_CRYPTO_CIPHER_SNOW3G_UEA2:\n+\t\tpara->algo = VIRTIO_CRYPTO_CIPHER_SNOW3G_UEA2;\n+\t\tbreak;\n+\tcase RTE_CRYPTO_CIPHER_ZUC_EEA3:\n+\t\tpara->algo = VIRTIO_CRYPTO_CIPHER_ZUC_EEA3;\n+\t\tbreak;\n+\tcase RTE_CRYPTO_CIPHER_DES_CBC:\n+\t\tpara->algo = VIRTIO_CRYPTO_CIPHER_DES_CBC;\n+\t\tbreak;\n+\tdefault:\n+\t\tPMD_SESSION_LOG(ERR, \"Crypto: Unsupported Cipher alg %u\",\n+\t\t\t\tcipher_xform->algo);\n+\t\treturn -1;\n+\t}\n+\n+\tpara->keylen = cipher_xform->key.length;\n+\tswitch (cipher_xform->op) {\n+\tcase RTE_CRYPTO_CIPHER_OP_ENCRYPT:\n+\t\tpara->op = VIRTIO_CRYPTO_OP_ENCRYPT;\n+\t\tbreak;\n+\tcase RTE_CRYPTO_CIPHER_OP_DECRYPT:\n+\t\tpara->op = VIRTIO_CRYPTO_OP_DECRYPT;\n+\t\tbreak;\n+\tdefault:\n+\t\tPMD_SESSION_LOG(ERR, \"Unsupported cipher operation parameter\");\n+\t\treturn -1;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int virtio_crypto_sym_pad_auth_param(\n+\t\tstruct virtio_crypto_op_ctrl_req *ctrl,\n+\t\tstruct rte_crypto_auth_xform *auth_xform)\n+{\n+\tuint32_t *algo;\n+\tstruct virtio_crypto_alg_chain_session_para *para =\n+\t\t&(ctrl->u.sym_create_session.u.chain.para);\n+\n+\tswitch (ctrl->u.sym_create_session.u.chain.para.hash_mode) {\n+\tcase VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN:\n+\t\talgo = &(para->u.hash_param.algo);\n+\t\tbreak;\n+\tcase VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH:\n+\t\talgo = &(para->u.mac_param.algo);\n+\t\tbreak;\n+\tdefault:\n+\t\tPMD_SESSION_LOG(ERR, \"Unsupported hash mode %u specified\",\n+\t\t\tctrl->u.sym_create_session.u.chain.para.hash_mode);\n+\t\treturn -1;\n+\t}\n+\n+\tswitch (auth_xform->algo) {\n+\tcase RTE_CRYPTO_AUTH_NULL:\n+\t\t*algo = VIRTIO_CRYPTO_NO_MAC;\n+\t\tbreak;\n+\tcase RTE_CRYPTO_AUTH_AES_CBC_MAC:\n+\t\t*algo = VIRTIO_CRYPTO_MAC_CBCMAC_AES;\n+\t\tbreak;\n+\tcase RTE_CRYPTO_AUTH_AES_CMAC:\n+\t\t*algo = VIRTIO_CRYPTO_MAC_CMAC_AES;\n+\t\tbreak;\n+\tcase RTE_CRYPTO_AUTH_AES_GMAC:\n+\t\t*algo = VIRTIO_CRYPTO_MAC_GMAC_AES;\n+\t\tbreak;\n+\tcase RTE_CRYPTO_AUTH_AES_XCBC_MAC:\n+\t\t*algo = VIRTIO_CRYPTO_MAC_XCBC_AES;\n+\t\tbreak;\n+\tcase RTE_CRYPTO_AUTH_KASUMI_F9:\n+\t\t*algo = VIRTIO_CRYPTO_MAC_KASUMI_F9;\n+\t\tbreak;\n+\tcase RTE_CRYPTO_AUTH_MD5:\n+\t\t*algo = VIRTIO_CRYPTO_HASH_MD5;\n+\t\tbreak;\n+\tcase RTE_CRYPTO_AUTH_MD5_HMAC:\n+\t\t*algo = VIRTIO_CRYPTO_MAC_HMAC_MD5;\n+\t\tbreak;\n+\tcase RTE_CRYPTO_AUTH_SHA1:\n+\t\t*algo = VIRTIO_CRYPTO_HASH_SHA1;\n+\t\tbreak;\n+\tcase RTE_CRYPTO_AUTH_SHA1_HMAC:\n+\t\t*algo = VIRTIO_CRYPTO_MAC_HMAC_SHA1;\n+\t\tbreak;\n+\tcase RTE_CRYPTO_AUTH_SHA224:\n+\t\t*algo = VIRTIO_CRYPTO_HASH_SHA_224;\n+\t\tbreak;\n+\tcase RTE_CRYPTO_AUTH_SHA224_HMAC:\n+\t\t*algo = VIRTIO_CRYPTO_MAC_HMAC_SHA_224;\n+\t\tbreak;\n+\tcase RTE_CRYPTO_AUTH_SHA256:\n+\t\t*algo = VIRTIO_CRYPTO_HASH_SHA_256;\n+\t\tbreak;\n+\tcase RTE_CRYPTO_AUTH_SHA256_HMAC:\n+\t\t*algo = VIRTIO_CRYPTO_MAC_HMAC_SHA_256;\n+\t\tbreak;\n+\tcase RTE_CRYPTO_AUTH_SHA384:\n+\t\t*algo = VIRTIO_CRYPTO_HASH_SHA_384;\n+\t\tbreak;\n+\tcase RTE_CRYPTO_AUTH_SHA384_HMAC:\n+\t\t*algo = VIRTIO_CRYPTO_MAC_HMAC_SHA_384;\n+\t\tbreak;\n+\tcase RTE_CRYPTO_AUTH_SHA512:\n+\t\t*algo = VIRTIO_CRYPTO_HASH_SHA_512;\n+\t\tbreak;\n+\tcase RTE_CRYPTO_AUTH_SHA512_HMAC:\n+\t\t*algo = VIRTIO_CRYPTO_MAC_HMAC_SHA_512;\n+\t\tbreak;\n+\tcase RTE_CRYPTO_AUTH_SNOW3G_UIA2:\n+\t\t*algo = VIRTIO_CRYPTO_MAC_SNOW3G_UIA2;\n+\t\tbreak;\n+\tdefault:\n+\t\tPMD_SESSION_LOG(ERR,\n+\t\t\t\"Crypto: Undefined Hash algo %u specified\",\n+\t\t\tauth_xform->algo);\n+\t\treturn -1;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int virtio_crypto_sym_pad_op_ctrl_req(\n+\t\tstruct virtio_crypto_op_ctrl_req *ctrl,\n+\t\tstruct rte_crypto_sym_xform *xform, bool is_chainned,\n+\t\tuint8_t **cipher_key_data, uint8_t **auth_key_data,\n+\t\tstruct virtio_crypto_session *session)\n+{\n+\tint ret;\n+\tstruct rte_crypto_auth_xform *auth_xform = NULL;\n+\tstruct rte_crypto_cipher_xform *cipher_xform = NULL;\n+\n+\t/* Get cipher xform from crypto xform chain */\n+\tcipher_xform = virtio_crypto_get_cipher_xform(xform);\n+\tif (cipher_xform) {\n+\t\tif (is_chainned)\n+\t\t\tret = virtio_crypto_sym_pad_cipher_param(\n+\t\t\t\t&ctrl->u.sym_create_session.u.chain.para\n+\t\t\t\t\t\t.cipher_param, cipher_xform);\n+\t\telse\n+\t\t\tret = virtio_crypto_sym_pad_cipher_param(\n+\t\t\t\t&ctrl->u.sym_create_session.u.cipher.para,\n+\t\t\t\tcipher_xform);\n+\n+\t\tif (ret < 0) {\n+\t\t\tPMD_SESSION_LOG(ERR,\n+\t\t\t\t\"pad cipher parameter failed\");\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\t*cipher_key_data = cipher_xform->key.data;\n+\n+\t\tsession->iv.offset = cipher_xform->iv.offset;\n+\t\tsession->iv.length = cipher_xform->iv.length;\n+\t}\n+\n+\t/* Get auth xform from crypto xform chain */\n+\tauth_xform = virtio_crypto_get_auth_xform(xform);\n+\tif (auth_xform) {\n+\t\t/* FIXME: support VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED */\n+\t\tstruct virtio_crypto_alg_chain_session_para *para =\n+\t\t\t&(ctrl->u.sym_create_session.u.chain.para);\n+\t\tif (auth_xform->key.length) {\n+\t\t\tpara->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH;\n+\t\t\tpara->u.mac_param.auth_key_len =\n+\t\t\t\t(uint32_t)auth_xform->key.length;\n+\t\t\tpara->u.mac_param.hash_result_len =\n+\t\t\t\tauth_xform->digest_length;\n+\n+\t\t\t*auth_key_data = auth_xform->key.data;\n+\t\t} else {\n+\t\t\tpara->hash_mode\t= VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN;\n+\t\t\tpara->u.hash_param.hash_result_len =\n+\t\t\t\tauth_xform->digest_length;\n+\t\t}\n+\n+\t\tret = virtio_crypto_sym_pad_auth_param(ctrl, auth_xform);\n+\t\tif (ret < 0) {\n+\t\t\tPMD_SESSION_LOG(ERR, \"pad auth parameter failed\");\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int virtio_crypto_check_sym_configure_session_paras(\n+\t\tstruct rte_cryptodev *dev,\n+\t\tstruct rte_crypto_sym_xform *xform,\n+\t\tstruct rte_cryptodev_sym_session *sym_sess,\n+\t\tstruct rte_mempool *mempool)\n+{\n+\tif (unlikely(xform == NULL) || unlikely(sym_sess == NULL) ||\n+\t\tunlikely(mempool == NULL)) {\n+\t\tPMD_SESSION_LOG(ERR, \"NULL pointer\");\n+\t\treturn -1;\n+\t}\n+\n+\tif (virtio_crypto_check_sym_session_paras(dev) < 0)\n+\t\treturn -1;\n+\n+\treturn 0;\n+}\n+\n+static int virtio_crypto_sym_configure_session(\n+\t\tstruct rte_cryptodev *dev,\n+\t\tstruct rte_crypto_sym_xform *xform,\n+\t\tstruct rte_cryptodev_sym_session *sess,\n+\t\tstruct rte_mempool *mempool)\n+{\n+\tint ret;\n+\tstruct virtio_crypto_session crypto_sess;\n+\tvoid *session_private = &crypto_sess;\n+\tstruct virtio_crypto_session *session;\n+\tstruct virtio_crypto_op_ctrl_req *ctrl_req;\n+\tenum virtio_crypto_cmd_id cmd_id;\n+\tuint8_t *cipher_key_data = NULL;\n+\tuint8_t *auth_key_data = NULL;\n+\tstruct virtio_crypto_hw *hw;\n+\tstruct virtqueue *control_vq;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tret = virtio_crypto_check_sym_configure_session_paras(dev, xform,\n+\t\t\tsess, mempool);\n+\tif (ret < 0) {\n+\t\tPMD_SESSION_LOG(ERR, \"Invalid parameters\");\n+\t\treturn ret;\n+\t}\n+\n+\tif (rte_mempool_get(mempool, &session_private)) {\n+\t\tPMD_SESSION_LOG(ERR,\n+\t\t\t\"Couldn't get object from session mempool\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tsession = (struct virtio_crypto_session *)session_private;\n+\tmemset(session, 0, sizeof(struct virtio_crypto_session));\n+\tctrl_req = &session->ctrl;\n+\tctrl_req->header.opcode = VIRTIO_CRYPTO_CIPHER_CREATE_SESSION;\n+\t/* FIXME: support multiqueue */\n+\tctrl_req->header.queue_id = 0;\n+\n+\thw = dev->data->dev_private;\n+\tcontrol_vq = hw->cvq;\n+\n+\tcmd_id = virtio_crypto_get_chain_order(xform);\n+\tif (cmd_id == VIRTIO_CRYPTO_CMD_CIPHER_HASH)\n+\t\tctrl_req->u.sym_create_session.u.chain.para.alg_chain_order\n+\t\t\t= VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH;\n+\tif (cmd_id == VIRTIO_CRYPTO_CMD_HASH_CIPHER)\n+\t\tctrl_req->u.sym_create_session.u.chain.para.alg_chain_order\n+\t\t\t= VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER;\n+\n+\tswitch (cmd_id) {\n+\tcase VIRTIO_CRYPTO_CMD_CIPHER_HASH:\n+\tcase VIRTIO_CRYPTO_CMD_HASH_CIPHER:\n+\t\tctrl_req->u.sym_create_session.op_type\n+\t\t\t= VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING;\n+\n+\t\tret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req,\n+\t\t\txform, true, &cipher_key_data, &auth_key_data, session);\n+\t\tif (ret < 0) {\n+\t\t\tPMD_SESSION_LOG(ERR,\n+\t\t\t\t\"padding sym op ctrl req failed\");\n+\t\t\tgoto error_out;\n+\t\t}\n+\t\tret = virtio_crypto_send_command(control_vq, ctrl_req,\n+\t\t\tcipher_key_data, auth_key_data, session);\n+\t\tif (ret < 0) {\n+\t\t\tPMD_SESSION_LOG(ERR,\n+\t\t\t\t\"create session failed: %d\", ret);\n+\t\t\tgoto error_out;\n+\t\t}\n+\t\tbreak;\n+\tcase VIRTIO_CRYPTO_CMD_CIPHER:\n+\t\tctrl_req->u.sym_create_session.op_type\n+\t\t\t= VIRTIO_CRYPTO_SYM_OP_CIPHER;\n+\t\tret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req, xform,\n+\t\t\tfalse, &cipher_key_data, &auth_key_data, session);\n+\t\tif (ret < 0) {\n+\t\t\tPMD_SESSION_LOG(ERR,\n+\t\t\t\t\"padding sym op ctrl req failed\");\n+\t\t\tgoto error_out;\n+\t\t}\n+\t\tret = virtio_crypto_send_command(control_vq, ctrl_req,\n+\t\t\tcipher_key_data, NULL, session);\n+\t\tif (ret < 0) {\n+\t\t\tPMD_SESSION_LOG(ERR,\n+\t\t\t\t\"create session failed: %d\", ret);\n+\t\t\tgoto error_out;\n+\t\t}\n+\t\tbreak;\n+\tdefault:\n+\t\tPMD_SESSION_LOG(ERR,\n+\t\t\t\"Unsupported operation chain order parameter\");\n+\t\tgoto error_out;\n+\t}\n+\n+\tset_session_private_data(sess, dev->driver_id,\n+\t\tsession_private);\n+\n+\treturn 0;\n+\n+error_out:\n+\treturn -1;\n+}\n+\n+static void\n+virtio_crypto_dev_info_get(struct rte_cryptodev *dev,\n+\t\tstruct rte_cryptodev_info *info)\n+{\n+\tstruct virtio_crypto_hw *hw = dev->data->dev_private;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tif (info != NULL) {\n+\t\tinfo->driver_id = cryptodev_virtio_driver_id;\n+\t\tinfo->pci_dev = RTE_DEV_TO_PCI(dev->device);\n+\t\tinfo->feature_flags = dev->feature_flags;\n+\t\tinfo->max_nb_queue_pairs = hw->max_dataqueues;\n+\t\tinfo->sym.max_nb_sessions =\n+\t\t\tRTE_VIRTIO_CRYPTO_PMD_MAX_NB_SESSIONS;\n+\t\tinfo->capabilities = hw->virtio_dev_capabilities;\n+\t}\n+}\n+\n+static int crypto_virtio_pci_probe(\n+\tstruct rte_pci_driver *pci_drv __rte_unused,\n+\tstruct rte_pci_device *pci_dev)\n+{\n+\tstruct rte_cryptodev_pmd_init_params init_params = {\n+\t\t.name = \"\",\n+\t\t.socket_id = rte_socket_id(),\n+\t\t.private_data_size = sizeof(struct virtio_crypto_hw),\n+\t\t.max_nb_sessions = RTE_VIRTIO_CRYPTO_PMD_MAX_NB_SESSIONS\n+\t};\n+\tchar name[RTE_CRYPTODEV_NAME_MAX_LEN];\n+\n+\tPMD_DRV_LOG(DEBUG, \"Found Crypto device at %02x:%02x.%x\",\n+\t\t\tpci_dev->addr.bus,\n+\t\t\tpci_dev->addr.devid,\n+\t\t\tpci_dev->addr.function);\n+\n+\trte_pci_device_name(&pci_dev->addr, name, sizeof(name));\n+\n+\treturn crypto_virtio_create(name, pci_dev, &init_params);\n+}\n+\n+static int crypto_virtio_pci_remove(struct rte_pci_device *pci_dev)\n+{\n+\tstruct rte_cryptodev *cryptodev;\n+\tchar cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];\n+\n+\tif (pci_dev == NULL)\n+\t\treturn -EINVAL;\n+\n+\trte_pci_device_name(&pci_dev->addr, cryptodev_name,\n+\t\t\tsizeof(cryptodev_name));\n+\n+\tcryptodev = rte_cryptodev_pmd_get_named_dev(cryptodev_name);\n+\tif (cryptodev == NULL)\n+\t\treturn -ENODEV;\n+\n+\treturn virtio_crypto_dev_uninit(cryptodev);\n+}\n+\n+static struct rte_pci_driver rte_virtio_crypto_driver = {\n+\t.id_table = pci_id_virtio_crypto_map,\n+\t.drv_flags = 0,\n+\t.probe = crypto_virtio_pci_probe,\n+\t.remove = crypto_virtio_pci_remove\n+};\n+\n+static struct cryptodev_driver virtio_crypto_drv;\n+\n+RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_VIRTIO_PMD, rte_virtio_crypto_driver);\n+RTE_PMD_REGISTER_CRYPTO_DRIVER(virtio_crypto_drv, rte_virtio_crypto_driver,\n+\t\tcryptodev_virtio_driver_id);\ndiff --git a/drivers/crypto/virtio/virtio_cryptodev.h b/drivers/crypto/virtio/virtio_cryptodev.h\nnew file mode 100644\nindex 0000000..875400c\n--- /dev/null\n+++ b/drivers/crypto/virtio/virtio_cryptodev.h\n@@ -0,0 +1,66 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.\n+ */\n+\n+#ifndef _VIRTIO_CRYPTODEV_H_\n+#define _VIRTIO_CRYPTODEV_H_\n+\n+#include <linux/virtio_crypto.h>\n+\n+#include \"virtio_pci.h\"\n+#include \"virtio_ring.h\"\n+\n+#ifndef PAGE_SIZE\n+#define PAGE_SIZE 4096\n+#endif\n+\n+#define CRYPTODEV_NAME_VIRTIO_PMD crypto_virtio\n+\n+#define NUM_ENTRY_VIRTIO_CRYPTO_OP 7\n+\n+/* Features desired/implemented by this driver. */\n+#define VIRTIO_CRYPTO_PMD_GUEST_FEATURES (1ULL << VIRTIO_F_VERSION_1)\n+\n+extern uint8_t cryptodev_virtio_driver_id;\n+\n+enum virtio_crypto_cmd_id {\n+\tVIRTIO_CRYPTO_CMD_CIPHER = 0,\n+\tVIRTIO_CRYPTO_CMD_AUTH = 1,\n+\tVIRTIO_CRYPTO_CMD_CIPHER_HASH = 2,\n+\tVIRTIO_CRYPTO_CMD_HASH_CIPHER = 3\n+};\n+\n+struct virtio_crypto_op_cookie {\n+\tstruct virtio_crypto_op_data_req data_req;\n+\tstruct virtio_crypto_inhdr inhdr;\n+\tstruct vring_desc desc[NUM_ENTRY_VIRTIO_CRYPTO_OP];\n+};\n+\n+/*\n+ * Control queue function prototype\n+ */\n+void virtio_crypto_ctrlq_start(struct rte_cryptodev *dev);\n+\n+/*\n+ * Data queue function prototype\n+ */\n+void virtio_crypto_dataq_start(struct rte_cryptodev *dev);\n+\n+int virtio_crypto_queue_setup(struct rte_cryptodev *dev,\n+\t\tint queue_type,\n+\t\tuint16_t vtpci_queue_idx,\n+\t\tuint16_t nb_desc,\n+\t\tint socket_id,\n+\t\tstruct virtqueue **pvq);\n+\n+void virtio_crypto_queue_release(struct virtqueue *vq);\n+\n+uint16_t virtio_crypto_pkt_tx_burst(void *tx_queue,\n+\t\tstruct rte_crypto_op **tx_pkts,\n+\t\tuint16_t nb_pkts);\n+\n+uint16_t virtio_crypto_pkt_rx_burst(void *tx_queue,\n+\t\tstruct rte_crypto_op **tx_pkts,\n+\t\tuint16_t nb_pkts);\n+\n+#endif /* _VIRTIO_CRYPTODEV_H_ */\ndiff --git a/drivers/crypto/virtio/virtio_rxtx.c b/drivers/crypto/virtio/virtio_rxtx.c\nnew file mode 100644\nindex 0000000..0d624ee\n--- /dev/null\n+++ b/drivers/crypto/virtio/virtio_rxtx.c\n@@ -0,0 +1,540 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.\n+ */\n+\n+#include <stdint.h>\n+#include <stdio.h>\n+#include <stdlib.h>\n+#include <string.h>\n+#include <errno.h>\n+\n+#include <rte_cycles.h>\n+#include <rte_memory.h>\n+#include <rte_memcpy.h>\n+#include <rte_memzone.h>\n+#include <rte_branch_prediction.h>\n+#include <rte_mempool.h>\n+#include <rte_malloc.h>\n+#include <rte_mbuf.h>\n+#include <rte_cryptodev.h>\n+#include <rte_prefetch.h>\n+#include <rte_string_fns.h>\n+#include <rte_errno.h>\n+#include <rte_byteorder.h>\n+#include <rte_cryptodev_pmd.h>\n+\n+#include \"virtqueue.h\"\n+#include \"virtio_cryptodev.h\"\n+#include \"virtio_crypto_algs.h\"\n+\n+#ifdef RTE_LIBRTE_PMD_VIRTIO_CRYPTO_DEBUG_DUMP\n+#define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)\n+#else\n+#define  VIRTIO_DUMP_PACKET(m, len) do { } while (0)\n+#endif\n+\n+static void\n+vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)\n+{\n+\tstruct vring_desc *dp, *dp_tail;\n+\tstruct vq_desc_extra *dxp;\n+\tuint16_t desc_idx_last = desc_idx;\n+\n+\tdp = &vq->vq_ring.desc[desc_idx];\n+\tdxp = &vq->vq_descx[desc_idx];\n+\tvq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);\n+\tif ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {\n+\t\twhile (dp->flags & VRING_DESC_F_NEXT) {\n+\t\t\tdesc_idx_last = dp->next;\n+\t\t\tdp = &vq->vq_ring.desc[dp->next];\n+\t\t}\n+\t}\n+\tdxp->ndescs = 0;\n+\n+\t/*\n+\t * We must append the existing free chain, if any, to the end of\n+\t * newly freed chain. If the virtqueue was completely used, then\n+\t * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).\n+\t */\n+\tif (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {\n+\t\tvq->vq_desc_head_idx = desc_idx;\n+\t} else {\n+\t\tdp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx];\n+\t\tdp_tail->next = desc_idx;\n+\t}\n+\n+\tvq->vq_desc_tail_idx = desc_idx_last;\n+\tdp->next = VQ_RING_DESC_CHAIN_END;\n+}\n+\n+static uint16_t\n+virtqueue_dequeue_burst_rx(struct virtqueue *vq,\n+\t\tstruct rte_crypto_op **rx_pkts, uint16_t num)\n+{\n+\tstruct vring_used_elem *uep;\n+\tstruct rte_crypto_op *cop;\n+\tuint16_t used_idx, desc_idx;\n+\tuint16_t i;\n+\tstruct virtio_crypto_inhdr *inhdr;\n+\tstruct virtio_crypto_op_cookie *op_cookie;\n+\n+\t/* Caller does the check */\n+\tfor (i = 0; i < num ; i++) {\n+\t\tused_idx = (uint16_t)(vq->vq_used_cons_idx\n+\t\t\t\t& (vq->vq_nentries - 1));\n+\t\tuep = &vq->vq_ring.used->ring[used_idx];\n+\t\tdesc_idx = (uint16_t)uep->id;\n+\t\tcop = (struct rte_crypto_op *)\n+\t\t\t\tvq->vq_descx[desc_idx].crypto_op;\n+\t\tif (unlikely(cop == NULL)) {\n+\t\t\tPMD_RX_LOG(ERR, \"vring descriptor with no \"\n+\t\t\t\t\t\"mbuf cookie at %u\",\n+\t\t\t\t\tvq->vq_used_cons_idx);\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\top_cookie = (struct virtio_crypto_op_cookie *)\n+\t\t\t\t\t\tvq->vq_descx[desc_idx].cookie;\n+\t\tinhdr = &(op_cookie->inhdr);\n+\t\tswitch (inhdr->status) {\n+\t\tcase VIRTIO_CRYPTO_OK:\n+\t\t\tcop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;\n+\t\t\tbreak;\n+\t\tcase VIRTIO_CRYPTO_ERR:\n+\t\t\tcop->status = RTE_CRYPTO_OP_STATUS_ERROR;\n+\t\t\tvq->packets_received_failed++;\n+\t\t\tbreak;\n+\t\tcase VIRTIO_CRYPTO_BADMSG:\n+\t\t\tcop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;\n+\t\t\tvq->packets_received_failed++;\n+\t\t\tbreak;\n+\t\tcase VIRTIO_CRYPTO_NOTSUPP:\n+\t\t\tcop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;\n+\t\t\tvq->packets_received_failed++;\n+\t\t\tbreak;\n+\t\tcase VIRTIO_CRYPTO_INVSESS:\n+\t\t\tcop->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;\n+\t\t\tvq->packets_received_failed++;\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tvq->packets_received_total++;\n+\n+\t\trx_pkts[i] = cop;\n+\t\trte_mempool_put(vq->mpool, op_cookie);\n+\n+\t\tvq->vq_used_cons_idx++;\n+\t\tvq_ring_free_chain(vq, desc_idx);\n+\t\tvq->vq_descx[desc_idx].crypto_op = NULL;\n+\t}\n+\n+\treturn i;\n+}\n+\n+static int\n+virtqueue_crypto_sym_pkt_header_arrange(\n+\t\tstruct rte_crypto_op *cop,\n+\t\tstruct virtio_crypto_op_data_req *data,\n+\t\tstruct virtio_crypto_session *session)\n+{\n+\tstruct rte_crypto_sym_op *sym_op = cop->sym;\n+\tstruct virtio_crypto_op_data_req *req_data = data;\n+\tstruct virtio_crypto_op_ctrl_req *ctrl = &session->ctrl;\n+\tstruct virtio_crypto_sym_create_session_req *sym_sess_req =\n+\t\t&ctrl->u.sym_create_session;\n+\tstruct virtio_crypto_alg_chain_session_para *chain_para =\n+\t\t&sym_sess_req->u.chain.para;\n+\tstruct virtio_crypto_cipher_session_para *cipher_para;\n+\n+\treq_data->header.session_id = session->session_id;\n+\n+\tswitch (sym_sess_req->op_type) {\n+\tcase VIRTIO_CRYPTO_SYM_OP_CIPHER:\n+\t\treq_data->u.sym_req.op_type = VIRTIO_CRYPTO_SYM_OP_CIPHER;\n+\n+\t\tcipher_para = &sym_sess_req->u.cipher.para;\n+\t\tif (cipher_para->op == VIRTIO_CRYPTO_OP_ENCRYPT)\n+\t\t\treq_data->header.opcode = VIRTIO_CRYPTO_CIPHER_ENCRYPT;\n+\t\telse\n+\t\t\treq_data->header.opcode = VIRTIO_CRYPTO_CIPHER_DECRYPT;\n+\n+\t\treq_data->u.sym_req.u.cipher.para.iv_len\n+\t\t\t= session->iv.length;\n+\n+\t\treq_data->u.sym_req.u.cipher.para.src_data_len =\n+\t\t\t(sym_op->cipher.data.length +\n+\t\t\t\tsym_op->cipher.data.offset);\n+\t\treq_data->u.sym_req.u.cipher.para.dst_data_len =\n+\t\t\treq_data->u.sym_req.u.cipher.para.src_data_len;\n+\t\tbreak;\n+\tcase VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:\n+\t\treq_data->u.sym_req.op_type =\n+\t\t\tVIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING;\n+\n+\t\tcipher_para = &chain_para->cipher_param;\n+\t\tif (cipher_para->op == VIRTIO_CRYPTO_OP_ENCRYPT)\n+\t\t\treq_data->header.opcode = VIRTIO_CRYPTO_CIPHER_ENCRYPT;\n+\t\telse\n+\t\t\treq_data->header.opcode = VIRTIO_CRYPTO_CIPHER_DECRYPT;\n+\n+\t\treq_data->u.sym_req.u.chain.para.iv_len = session->iv.length;\n+\t\treq_data->u.sym_req.u.chain.para.aad_len = session->aad.length;\n+\n+\t\treq_data->u.sym_req.u.chain.para.src_data_len =\n+\t\t\t(sym_op->cipher.data.length +\n+\t\t\t\tsym_op->cipher.data.offset);\n+\t\treq_data->u.sym_req.u.chain.para.dst_data_len =\n+\t\t\treq_data->u.sym_req.u.chain.para.src_data_len;\n+\t\treq_data->u.sym_req.u.chain.para.cipher_start_src_offset =\n+\t\t\tsym_op->cipher.data.offset;\n+\t\treq_data->u.sym_req.u.chain.para.len_to_cipher =\n+\t\t\tsym_op->cipher.data.length;\n+\t\treq_data->u.sym_req.u.chain.para.hash_start_src_offset =\n+\t\t\tsym_op->auth.data.offset;\n+\t\treq_data->u.sym_req.u.chain.para.len_to_hash =\n+\t\t\tsym_op->auth.data.length;\n+\t\treq_data->u.sym_req.u.chain.para.aad_len =\n+\t\t\tchain_para->aad_len;\n+\n+\t\tif (chain_para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN)\n+\t\t\treq_data->u.sym_req.u.chain.para.hash_result_len =\n+\t\t\t\tchain_para->u.hash_param.hash_result_len;\n+\t\tif (chain_para->hash_mode ==\n+\t\t\tVIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)\n+\t\t\treq_data->u.sym_req.u.chain.para.hash_result_len =\n+\t\t\t\tchain_para->u.mac_param.hash_result_len;\n+\t\tbreak;\n+\tdefault:\n+\t\treturn -1;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+virtqueue_crypto_sym_enqueue_xmit(\n+\t\tstruct virtqueue *txvq,\n+\t\tstruct rte_crypto_op *cop)\n+{\n+\tuint16_t idx = 0;\n+\tuint16_t num_entry;\n+\tuint16_t needed = 1;\n+\tuint16_t head_idx;\n+\tstruct vq_desc_extra *dxp;\n+\tstruct vring_desc *start_dp;\n+\tstruct vring_desc *desc;\n+\tuint64_t indirect_op_data_req_phys_addr;\n+\tuint16_t req_data_len = sizeof(struct virtio_crypto_op_data_req);\n+\tuint32_t indirect_vring_addr_offset = req_data_len +\n+\t\tsizeof(struct virtio_crypto_inhdr);\n+\tstruct rte_crypto_sym_op *sym_op = cop->sym;\n+\tstruct virtio_crypto_session *session =\n+\t\t(struct virtio_crypto_session *)get_session_private_data(\n+\t\tcop->sym->session, cryptodev_virtio_driver_id);\n+\tstruct virtio_crypto_op_data_req *op_data_req;\n+\tuint32_t hash_result_len = 0;\n+\tstruct virtio_crypto_op_cookie *crypto_op_cookie;\n+\tstruct virtio_crypto_alg_chain_session_para *para;\n+\n+\tif (unlikely(sym_op->m_src->nb_segs != 1))\n+\t\treturn -EMSGSIZE;\n+\tif (unlikely(txvq->vq_free_cnt == 0))\n+\t\treturn -ENOSPC;\n+\tif (unlikely(txvq->vq_free_cnt < needed))\n+\t\treturn -EMSGSIZE;\n+\thead_idx = txvq->vq_desc_head_idx;\n+\tif (unlikely(head_idx >= txvq->vq_nentries))\n+\t\treturn -EFAULT;\n+\tif (unlikely(session == NULL))\n+\t\treturn -EFAULT;\n+\n+\tdxp = &txvq->vq_descx[head_idx];\n+\n+\tif (rte_mempool_get(txvq->mpool, &dxp->cookie)) {\n+\t\tPMD_TX_LOG(ERR, \"can not get cookie\");\n+\t\treturn -EFAULT;\n+\t}\n+\tcrypto_op_cookie = dxp->cookie;\n+\tindirect_op_data_req_phys_addr =\n+\t\trte_mempool_virt2iova(crypto_op_cookie);\n+\top_data_req = (struct virtio_crypto_op_data_req *)crypto_op_cookie;\n+\n+\tif (virtqueue_crypto_sym_pkt_header_arrange(cop, op_data_req, session))\n+\t\treturn -EFAULT;\n+\n+\t/* status is initialized to VIRTIO_CRYPTO_ERR */\n+\t((struct virtio_crypto_inhdr *)\n+\t\t((uint8_t *)op_data_req + req_data_len))->status =\n+\t\tVIRTIO_CRYPTO_ERR;\n+\n+\t/* point to indirect vring entry */\n+\tdesc = (struct vring_desc *)\n+\t\t((uint8_t *)op_data_req + indirect_vring_addr_offset);\n+\tfor (idx = 0; idx < (NUM_ENTRY_VIRTIO_CRYPTO_OP - 1); idx++)\n+\t\tdesc[idx].next = idx + 1;\n+\tdesc[NUM_ENTRY_VIRTIO_CRYPTO_OP - 1].next = VQ_RING_DESC_CHAIN_END;\n+\n+\tidx = 0;\n+\n+\t/* indirect vring: first part, virtio_crypto_op_data_req */\n+\tdesc[idx].addr = indirect_op_data_req_phys_addr;\n+\tdesc[idx].len = req_data_len;\n+\tdesc[idx++].flags = VRING_DESC_F_NEXT;\n+\n+\t/* indirect vring: iv of cipher */\n+\tif (session->iv.length) {\n+\t\tdesc[idx].addr = cop->phys_addr + session->iv.offset;\n+\t\tdesc[idx].len = session->iv.length;\n+\t\tdesc[idx++].flags = VRING_DESC_F_NEXT;\n+\t}\n+\n+\t/* indirect vring: additional auth data */\n+\tif (session->aad.length) {\n+\t\tdesc[idx].addr = session->aad.phys_addr;\n+\t\tdesc[idx].len = session->aad.length;\n+\t\tdesc[idx++].flags = VRING_DESC_F_NEXT;\n+\t}\n+\n+\t/* indirect vring: src data */\n+\tdesc[idx].addr = rte_pktmbuf_mtophys_offset(sym_op->m_src, 0);\n+\tdesc[idx].len = (sym_op->cipher.data.offset\n+\t\t+ sym_op->cipher.data.length);\n+\tdesc[idx++].flags = VRING_DESC_F_NEXT;\n+\n+\t/* indirect vring: dst data */\n+\tif (sym_op->m_dst) {\n+\t\tdesc[idx].addr = rte_pktmbuf_mtophys_offset(sym_op->m_dst, 0);\n+\t\tdesc[idx].len = (sym_op->cipher.data.offset\n+\t\t\t+ sym_op->cipher.data.length);\n+\t} else {\n+\t\tdesc[idx].addr = rte_pktmbuf_mtophys_offset(sym_op->m_src, 0);\n+\t\tdesc[idx].len = (sym_op->cipher.data.offset\n+\t\t\t+ sym_op->cipher.data.length);\n+\t}\n+\tdesc[idx++].flags = VRING_DESC_F_WRITE | VRING_DESC_F_NEXT;\n+\n+\t/* indirect vring: digest result */\n+\tpara = &(session->ctrl.u.sym_create_session.u.chain.para);\n+\tif (para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN)\n+\t\thash_result_len = para->u.hash_param.hash_result_len;\n+\tif (para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)\n+\t\thash_result_len = para->u.mac_param.hash_result_len;\n+\tif (hash_result_len > 0) {\n+\t\tdesc[idx].addr = sym_op->auth.digest.phys_addr;\n+\t\tdesc[idx].len = hash_result_len;\n+\t\tdesc[idx++].flags = VRING_DESC_F_WRITE | VRING_DESC_F_NEXT;\n+\t}\n+\n+\t/* indirect vring: last part, status returned */\n+\tdesc[idx].addr = indirect_op_data_req_phys_addr + req_data_len;\n+\tdesc[idx].len = sizeof(struct virtio_crypto_inhdr);\n+\tdesc[idx++].flags = VRING_DESC_F_WRITE;\n+\n+\tnum_entry = idx;\n+\n+\t/* save the infos to use when receiving packets */\n+\tdxp->crypto_op = (void *)cop;\n+\tdxp->ndescs = needed;\n+\n+\t/* use a single buffer */\n+\tstart_dp = txvq->vq_ring.desc;\n+\tstart_dp[head_idx].addr = indirect_op_data_req_phys_addr +\n+\t\tindirect_vring_addr_offset;\n+\tstart_dp[head_idx].len = num_entry * sizeof(struct vring_desc);\n+\tstart_dp[head_idx].flags = VRING_DESC_F_INDIRECT;\n+\n+\tidx = start_dp[head_idx].next;\n+\ttxvq->vq_desc_head_idx = idx;\n+\tif (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)\n+\t\ttxvq->vq_desc_tail_idx = idx;\n+\ttxvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed);\n+\tvq_update_avail_ring(txvq, head_idx);\n+\n+\treturn 0;\n+}\n+\n+static int\n+virtqueue_crypto_enqueue_xmit(struct virtqueue *txvq,\n+\t\tstruct rte_crypto_op *cop)\n+{\n+\tint ret;\n+\n+\tswitch (cop->type) {\n+\tcase RTE_CRYPTO_OP_TYPE_SYMMETRIC:\n+\t\tret = virtqueue_crypto_sym_enqueue_xmit(txvq, cop);\n+\t\tbreak;\n+\tdefault:\n+\t\tPMD_TX_LOG(ERR, \"invalid crypto op type %u\", cop->type);\n+\t\tret = -EFAULT;\n+\t\tbreak;\n+\t}\n+\n+\treturn ret;\n+}\n+\n+static int\n+virtio_crypto_vring_start(struct virtqueue *vq)\n+{\n+\tstruct virtio_crypto_hw *hw = vq->hw;\n+\tint i, size = vq->vq_nentries;\n+\tstruct vring *vr = &vq->vq_ring;\n+\tuint8_t *ring_mem = vq->vq_ring_virt_mem;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tvring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN);\n+\tvq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);\n+\tvq->vq_free_cnt = vq->vq_nentries;\n+\n+\t/* Chain all the descriptors in the ring with an END */\n+\tfor (i = 0; i < size - 1; i++)\n+\t\tvr->desc[i].next = (uint16_t)(i + 1);\n+\tvr->desc[i].next = VQ_RING_DESC_CHAIN_END;\n+\n+\t/*\n+\t * Disable device(host) interrupting guest\n+\t */\n+\tvirtqueue_disable_intr(vq);\n+\n+\t/*\n+\t * Set guest physical address of the virtqueue\n+\t * in VIRTIO_PCI_QUEUE_PFN config register of device\n+\t * to share with the backend\n+\t */\n+\tif (VTPCI_OPS(hw)->setup_queue(hw, vq) < 0) {\n+\t\tPMD_INIT_LOG(ERR, \"setup_queue failed\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+void\n+virtio_crypto_ctrlq_start(struct rte_cryptodev *dev)\n+{\n+\tstruct virtio_crypto_hw *hw = dev->data->dev_private;\n+\n+\tif (hw->cvq) {\n+\t\tvirtio_crypto_vring_start(hw->cvq);\n+\t\tVIRTQUEUE_DUMP((struct virtqueue *)hw->cvq);\n+\t}\n+}\n+\n+void\n+virtio_crypto_dataq_start(struct rte_cryptodev *dev)\n+{\n+\t/*\n+\t * Start data vrings\n+\t * -\tSetup vring structure for data queues\n+\t */\n+\tuint16_t i;\n+\tstruct virtio_crypto_hw *hw = dev->data->dev_private;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\t/* Start data vring. */\n+\tfor (i = 0; i < hw->max_dataqueues; i++) {\n+\t\tvirtio_crypto_vring_start(dev->data->queue_pairs[i]);\n+\t\tVIRTQUEUE_DUMP((struct virtqueue *)dev->data->queue_pairs[i]);\n+\t}\n+}\n+\n+/* vring size of data queue is 1024 */\n+#define VIRTIO_MBUF_BURST_SZ 1024\n+\n+uint16_t\n+virtio_crypto_pkt_rx_burst(void *tx_queue, struct rte_crypto_op **rx_pkts,\n+\t\tuint16_t nb_pkts)\n+{\n+\tstruct virtqueue *txvq = tx_queue;\n+\tuint16_t nb_used, num, nb_rx;\n+\n+\tnb_used = VIRTQUEUE_NUSED(txvq);\n+\n+\tvirtio_rmb();\n+\n+\tnum = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);\n+\tnum = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ)\n+\t\t? num : VIRTIO_MBUF_BURST_SZ);\n+\n+\tif (num == 0)\n+\t\treturn 0;\n+\n+\tnb_rx = virtqueue_dequeue_burst_rx(txvq, rx_pkts, num);\n+\tPMD_RX_LOG(DEBUG, \"used:%d dequeue:%d\", nb_used, num);\n+\n+\treturn nb_rx;\n+}\n+\n+uint16_t\n+virtio_crypto_pkt_tx_burst(void *tx_queue, struct rte_crypto_op **tx_pkts,\n+\t\tuint16_t nb_pkts)\n+{\n+\tstruct virtqueue *txvq;\n+\tuint16_t nb_tx;\n+\tint error;\n+\n+\tif (unlikely(nb_pkts < 1))\n+\t\treturn nb_pkts;\n+\tif (unlikely(tx_queue == NULL)) {\n+\t\tPMD_TX_LOG(ERR, \"tx_queue is NULL\");\n+\t\treturn 0;\n+\t}\n+\ttxvq = tx_queue;\n+\n+\tPMD_TX_LOG(DEBUG, \"%d packets to xmit\", nb_pkts);\n+\n+\tfor (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {\n+\t\tstruct rte_mbuf *txm = tx_pkts[nb_tx]->sym->m_src;\n+\t\t/* nb_segs is always 1 at virtio crypto situation */\n+\t\tint need = txm->nb_segs - txvq->vq_free_cnt;\n+\n+\t\t/*\n+\t\t * Positive value indicates it hasn't enough space in vring\n+\t\t * descriptors\n+\t\t */\n+\t\tif (unlikely(need > 0)) {\n+\t\t\t/*\n+\t\t\t * try it again because the receive process may be\n+\t\t\t * free some space\n+\t\t\t */\n+\t\t\tneed = txm->nb_segs - txvq->vq_free_cnt;\n+\t\t\tif (unlikely(need > 0)) {\n+\t\t\t\tPMD_TX_LOG(ERR, \"No free tx descriptors \"\n+\t\t\t\t\t\t\"to transmit\");\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t}\n+\n+\t\ttxvq->packets_sent_total++;\n+\n+\t\t/* Enqueue Packet buffers */\n+\t\terror = virtqueue_crypto_enqueue_xmit(txvq, tx_pkts[nb_tx]);\n+\t\tif (unlikely(error)) {\n+\t\t\tif (error == ENOSPC)\n+\t\t\t\tPMD_TX_LOG(ERR,\n+\t\t\t\t\t\"virtqueue_enqueue Free count = 0\");\n+\t\t\telse if (error == EMSGSIZE)\n+\t\t\t\tPMD_TX_LOG(ERR,\n+\t\t\t\t\t\"virtqueue_enqueue Free count < 1\");\n+\t\t\telse\n+\t\t\t\tPMD_TX_LOG(ERR,\n+\t\t\t\t\t\"virtqueue_enqueue error: %d\", error);\n+\t\t\ttxvq->packets_sent_failed++;\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\tif (likely(nb_tx)) {\n+\t\tvq_update_avail_idx(txvq);\n+\n+\t\tif (unlikely(virtqueue_kick_prepare(txvq))) {\n+\t\t\tvirtqueue_notify(txvq);\n+\t\t\tPMD_TX_LOG(DEBUG, \"Notified backend after xmit\");\n+\t\t}\n+\t}\n+\n+\treturn nb_tx;\n+}\n",
    "prefixes": [
        "dpdk-dev",
        "v3",
        "3/7"
    ]
}