From patchwork Fri Jan 4 11:22:45 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Fan Zhang X-Patchwork-Id: 49444 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 3160A1B496; Fri, 4 Jan 2019 12:22:52 +0100 (CET) Received: from mga05.intel.com (mga05.intel.com [192.55.52.43]) by dpdk.org (Postfix) with ESMTP id 436081B442; Fri, 4 Jan 2019 12:22:50 +0100 (CET) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by fmsmga105.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 04 Jan 2019 03:22:49 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.56,438,1539673200"; d="scan'208";a="135286819" Received: from silpixa00398673.ir.intel.com (HELO silpixa00398673.ger.corp.intel.com) ([10.237.223.54]) by fmsmga001.fm.intel.com with ESMTP; 04 Jan 2019 03:22:48 -0800 From: Fan Zhang To: dev@dpdk.org Cc: maxime.coquelin@redhat.com, tiwei.bie@intel.com, stable@dpdk.org Date: Fri, 4 Jan 2019 11:22:45 +0000 Message-Id: <20190104112246.9338-2-roy.fan.zhang@intel.com> X-Mailer: git-send-email 2.13.6 In-Reply-To: <20190104112246.9338-1-roy.fan.zhang@intel.com> References: <20190104112246.9338-1-roy.fan.zhang@intel.com> Subject: [dpdk-dev] [PATCH 1/2] vhost/crypto: fix possible dead loop X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch fixes a possible infinite loop caused by incorrect descriptor chain created by the driver. Cc: stable@dpdk.org Fixes: 3bb595ecd682 ("vhost/crypto: add request handler") Signed-off-by: Fan Zhang Reviewed-by: Maxime Coquelin --- lib/librte_vhost/vhost_crypto.c | 121 +++++++++++++++++++++++++++------------- 1 file changed, 82 insertions(+), 39 deletions(-) diff --git a/lib/librte_vhost/vhost_crypto.c b/lib/librte_vhost/vhost_crypto.c index dd01afc08..80b83ef77 100644 --- a/lib/librte_vhost/vhost_crypto.c +++ b/lib/librte_vhost/vhost_crypto.c @@ -466,12 +466,17 @@ vhost_crypto_msg_post_handler(int vid, void *msg) } static __rte_always_inline struct vring_desc * -find_write_desc(struct vring_desc *head, struct vring_desc *desc) +find_write_desc(struct vring_desc *head, struct vring_desc *desc, + uint32_t *nb_descs) { if (desc->flags & VRING_DESC_F_WRITE) return desc; while (desc->flags & VRING_DESC_F_NEXT) { + if (unlikely(*nb_descs == 0)) + return NULL; + (*nb_descs)--; + desc = &head[desc->next]; if (desc->flags & VRING_DESC_F_WRITE) return desc; @@ -481,13 +486,18 @@ find_write_desc(struct vring_desc *head, struct vring_desc *desc) } static struct virtio_crypto_inhdr * -reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc) +reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc, + uint32_t *nb_descs) { uint64_t dlen; struct virtio_crypto_inhdr *inhdr; - while (desc->flags & VRING_DESC_F_NEXT) + while (desc->flags & VRING_DESC_F_NEXT) { + if (unlikely(*nb_descs == 0)) + return NULL; + (*nb_descs)--; desc = &vc_req->head[desc->next]; + } dlen = desc->len; inhdr = IOVA_TO_VVA(struct virtio_crypto_inhdr *, vc_req, desc->addr, @@ -500,15 +510,16 @@ reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc) static __rte_always_inline int move_desc(struct vring_desc *head, struct vring_desc **cur_desc, - uint32_t size) + uint32_t size, uint32_t *nb_descs) { struct vring_desc *desc = *cur_desc; - int left = size; - - rte_prefetch0(&head[desc->next]); - left -= desc->len; + int left = size - desc->len; while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) { + (*nb_descs)--; + if (unlikely(*nb_descs == 0)) + return -1; + desc = &head[desc->next]; rte_prefetch0(&head[desc->next]); left -= desc->len; @@ -517,7 +528,10 @@ move_desc(struct vring_desc *head, struct vring_desc **cur_desc, if (unlikely(left > 0)) return -1; - *cur_desc = &head[desc->next]; + if (unlikely(*nb_descs == 0)) + *cur_desc = NULL; + else + *cur_desc = &head[desc->next]; return 0; } @@ -539,7 +553,7 @@ get_data_ptr(struct vhost_crypto_data_req *vc_req, struct vring_desc *cur_desc, static int copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req, - struct vring_desc **cur_desc, uint32_t size) + struct vring_desc **cur_desc, uint32_t size, uint32_t *nb_descs) { struct vring_desc *desc = *cur_desc; uint64_t remain, addr, dlen, len; @@ -548,7 +562,6 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req, uint8_t *src; int left = size; - rte_prefetch0(&vc_req->head[desc->next]); to_copy = RTE_MIN(desc->len, (uint32_t)left); dlen = to_copy; src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen, @@ -582,6 +595,12 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req, left -= to_copy; while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) { + if (unlikely(*nb_descs == 0)) { + VC_LOG_ERR("Invalid descriptors"); + return -1; + } + (*nb_descs)--; + desc = &vc_req->head[desc->next]; rte_prefetch0(&vc_req->head[desc->next]); to_copy = RTE_MIN(desc->len, (uint32_t)left); @@ -624,7 +643,10 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req, return -1; } - *cur_desc = &vc_req->head[desc->next]; + if (unlikely(*nb_descs == 0)) + *cur_desc = NULL; + else + *cur_desc = &vc_req->head[desc->next]; return 0; } @@ -684,7 +706,8 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req, struct vhost_crypto_writeback_data **end_wb_data, uint8_t *src, uint32_t offset, - uint64_t write_back_len) + uint64_t write_back_len, + uint32_t *nb_descs) { struct vhost_crypto_writeback_data *wb_data, *head; struct vring_desc *desc = *cur_desc; @@ -731,6 +754,12 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req, offset -= desc->len; while (write_back_len) { + if (unlikely(*nb_descs == 0)) { + VC_LOG_ERR("Invalid descriptors"); + goto error_exit; + } + (*nb_descs)--; + desc = &vc_req->head[desc->next]; if (unlikely(!(desc->flags & VRING_DESC_F_WRITE))) { VC_LOG_ERR("incorrect descriptor"); @@ -770,7 +799,10 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req, wb_data->next = NULL; } - *cur_desc = &vc_req->head[desc->next]; + if (unlikely(*nb_descs == 0)) + *cur_desc = NULL; + else + *cur_desc = &vc_req->head[desc->next]; *end_wb_data = wb_data; @@ -787,7 +819,8 @@ static uint8_t prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, struct vhost_crypto_data_req *vc_req, struct virtio_crypto_cipher_data_req *cipher, - struct vring_desc *cur_desc) + struct vring_desc *cur_desc, + uint32_t *nb_descs) { struct vring_desc *desc = cur_desc; struct vhost_crypto_writeback_data *ewb = NULL; @@ -797,8 +830,8 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, /* prepare */ /* iv */ - if (unlikely(copy_data(iv_data, vc_req, &desc, - cipher->para.iv_len) < 0)) { + if (unlikely(copy_data(iv_data, vc_req, &desc, cipher->para.iv_len, + nb_descs) < 0)) { ret = VIRTIO_CRYPTO_BADMSG; goto error_exit; } @@ -818,7 +851,7 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, } if (unlikely(move_desc(vc_req->head, &desc, - cipher->para.src_data_len) < 0)) { + cipher->para.src_data_len, nb_descs) < 0)) { VC_LOG_ERR("Incorrect descriptor"); ret = VIRTIO_CRYPTO_ERR; goto error_exit; @@ -835,8 +868,8 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, goto error_exit; } if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *), - vc_req, &desc, cipher->para.src_data_len) - < 0)) { + vc_req, &desc, cipher->para.src_data_len, + nb_descs) < 0)) { ret = VIRTIO_CRYPTO_BADMSG; goto error_exit; } @@ -847,7 +880,7 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, } /* dst */ - desc = find_write_desc(vc_req->head, desc); + desc = find_write_desc(vc_req->head, desc, nb_descs); if (unlikely(!desc)) { VC_LOG_ERR("Cannot find write location"); ret = VIRTIO_CRYPTO_BADMSG; @@ -866,7 +899,7 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, } if (unlikely(move_desc(vc_req->head, &desc, - cipher->para.dst_data_len) < 0)) { + cipher->para.dst_data_len, nb_descs) < 0)) { VC_LOG_ERR("Incorrect descriptor"); ret = VIRTIO_CRYPTO_ERR; goto error_exit; @@ -877,7 +910,7 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE: vc_req->wb = prepare_write_back_data(vc_req, &desc, &ewb, rte_pktmbuf_mtod(m_src, uint8_t *), 0, - cipher->para.dst_data_len); + cipher->para.dst_data_len, nb_descs); if (unlikely(vc_req->wb == NULL)) { ret = VIRTIO_CRYPTO_ERR; goto error_exit; @@ -919,7 +952,8 @@ static uint8_t prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, struct vhost_crypto_data_req *vc_req, struct virtio_crypto_alg_chain_data_req *chain, - struct vring_desc *cur_desc) + struct vring_desc *cur_desc, + uint32_t *nb_descs) { struct vring_desc *desc = cur_desc, *digest_desc; struct vhost_crypto_writeback_data *ewb = NULL, *ewb2 = NULL; @@ -932,7 +966,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, /* prepare */ /* iv */ if (unlikely(copy_data(iv_data, vc_req, &desc, - chain->para.iv_len) < 0)) { + chain->para.iv_len, nb_descs) < 0)) { ret = VIRTIO_CRYPTO_BADMSG; goto error_exit; } @@ -953,7 +987,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, } if (unlikely(move_desc(vc_req->head, &desc, - chain->para.src_data_len) < 0)) { + chain->para.src_data_len, nb_descs) < 0)) { VC_LOG_ERR("Incorrect descriptor"); ret = VIRTIO_CRYPTO_ERR; goto error_exit; @@ -969,7 +1003,8 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, goto error_exit; } if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *), - vc_req, &desc, chain->para.src_data_len)) < 0) { + vc_req, &desc, chain->para.src_data_len, + nb_descs)) < 0) { ret = VIRTIO_CRYPTO_BADMSG; goto error_exit; } @@ -981,7 +1016,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, } /* dst */ - desc = find_write_desc(vc_req->head, desc); + desc = find_write_desc(vc_req->head, desc, nb_descs); if (unlikely(!desc)) { VC_LOG_ERR("Cannot find write location"); ret = VIRTIO_CRYPTO_BADMSG; @@ -1000,7 +1035,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, } if (unlikely(move_desc(vc_req->head, &desc, - chain->para.dst_data_len) < 0)) { + chain->para.dst_data_len, nb_descs) < 0)) { VC_LOG_ERR("Incorrect descriptor"); ret = VIRTIO_CRYPTO_ERR; goto error_exit; @@ -1017,7 +1052,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, } if (unlikely(move_desc(vc_req->head, &desc, - chain->para.hash_result_len) < 0)) { + chain->para.hash_result_len, nb_descs) < 0)) { VC_LOG_ERR("Incorrect descriptor"); ret = VIRTIO_CRYPTO_ERR; goto error_exit; @@ -1029,7 +1064,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, rte_pktmbuf_mtod(m_src, uint8_t *), chain->para.cipher_start_src_offset, chain->para.dst_data_len - - chain->para.cipher_start_src_offset); + chain->para.cipher_start_src_offset, nb_descs); if (unlikely(vc_req->wb == NULL)) { ret = VIRTIO_CRYPTO_ERR; goto error_exit; @@ -1042,14 +1077,15 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, /** create a wb_data for digest */ ewb->next = prepare_write_back_data(vc_req, &desc, &ewb2, - digest_addr, 0, chain->para.hash_result_len); + digest_addr, 0, chain->para.hash_result_len, + nb_descs); if (unlikely(ewb->next == NULL)) { ret = VIRTIO_CRYPTO_ERR; goto error_exit; } if (unlikely(copy_data(digest_addr, vc_req, &digest_desc, - chain->para.hash_result_len)) < 0) { + chain->para.hash_result_len, nb_descs)) < 0) { ret = VIRTIO_CRYPTO_BADMSG; goto error_exit; } @@ -1108,6 +1144,7 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto, struct vring_desc *desc = NULL; uint64_t session_id; uint64_t dlen; + uint32_t nb_descs = vq->size; int err = 0; vc_req->desc_idx = desc_idx; @@ -1116,6 +1153,10 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto, if (likely(head->flags & VRING_DESC_F_INDIRECT)) { dlen = head->len; + nb_descs = dlen / sizeof(struct vring_desc); + /* drop invalid descriptors */ + if (unlikely(nb_descs > vq->size)) + return -1; desc = IOVA_TO_VVA(struct vring_desc *, vc_req, head->addr, &dlen, VHOST_ACCESS_RO); if (unlikely(!desc || dlen != head->len)) @@ -1138,8 +1179,8 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto, goto error_exit; case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE: req = &tmp_req; - if (unlikely(copy_data(req, vc_req, &desc, sizeof(*req)) - < 0)) { + if (unlikely(copy_data(req, vc_req, &desc, sizeof(*req), + &nb_descs) < 0)) { err = VIRTIO_CRYPTO_BADMSG; VC_LOG_ERR("Invalid descriptor"); goto error_exit; @@ -1152,7 +1193,7 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto, } } else { if (unlikely(move_desc(vc_req->head, &desc, - sizeof(*req)) < 0)) { + sizeof(*req), &nb_descs) < 0)) { VC_LOG_ERR("Incorrect descriptor"); goto error_exit; } @@ -1193,11 +1234,13 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto, break; case VIRTIO_CRYPTO_SYM_OP_CIPHER: err = prepare_sym_cipher_op(vcrypto, op, vc_req, - &req->u.sym_req.u.cipher, desc); + &req->u.sym_req.u.cipher, desc, + &nb_descs); break; case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING: err = prepare_sym_chain_op(vcrypto, op, vc_req, - &req->u.sym_req.u.chain, desc); + &req->u.sym_req.u.chain, desc, + &nb_descs); break; } if (unlikely(err != 0)) { @@ -1215,7 +1258,7 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto, error_exit: - inhdr = reach_inhdr(vc_req, desc); + inhdr = reach_inhdr(vc_req, desc, &nb_descs); if (likely(inhdr != NULL)) inhdr->status = (uint8_t)err; From patchwork Fri Jan 4 11:22:46 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Fan Zhang X-Patchwork-Id: 49445 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id D06211B4A2; Fri, 4 Jan 2019 12:22:53 +0100 (CET) Received: from mga05.intel.com (mga05.intel.com [192.55.52.43]) by dpdk.org (Postfix) with ESMTP id 7BBE21B442; Fri, 4 Jan 2019 12:22:51 +0100 (CET) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by fmsmga105.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 04 Jan 2019 03:22:51 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.56,438,1539673200"; d="scan'208";a="135286826" Received: from silpixa00398673.ir.intel.com (HELO silpixa00398673.ger.corp.intel.com) ([10.237.223.54]) by fmsmga001.fm.intel.com with ESMTP; 04 Jan 2019 03:22:50 -0800 From: Fan Zhang To: dev@dpdk.org Cc: maxime.coquelin@redhat.com, tiwei.bie@intel.com, stable@dpdk.org Date: Fri, 4 Jan 2019 11:22:46 +0000 Message-Id: <20190104112246.9338-3-roy.fan.zhang@intel.com> X-Mailer: git-send-email 2.13.6 In-Reply-To: <20190104112246.9338-1-roy.fan.zhang@intel.com> References: <20190104112246.9338-1-roy.fan.zhang@intel.com> Subject: [dpdk-dev] [PATCH 2/2] vhost/crypto: fix possible out of bound access X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch fixes a out of bound access possbility in vhost crypto. Originally the incorrect next descriptor index may cause the library read invalid memory content and crash the application. Cc: stable@dpdk.org Fixes: 3bb595ecd682 ("vhost/crypto: add request handler") Signed-off-by: Fan Zhang Reviewed-by: Maxime Coquelin --- lib/librte_vhost/vhost_crypto.c | 89 ++++++++++++++++++++++++----------------- 1 file changed, 53 insertions(+), 36 deletions(-) diff --git a/lib/librte_vhost/vhost_crypto.c b/lib/librte_vhost/vhost_crypto.c index 80b83ef77..0694c0a74 100644 --- a/lib/librte_vhost/vhost_crypto.c +++ b/lib/librte_vhost/vhost_crypto.c @@ -467,13 +467,13 @@ vhost_crypto_msg_post_handler(int vid, void *msg) static __rte_always_inline struct vring_desc * find_write_desc(struct vring_desc *head, struct vring_desc *desc, - uint32_t *nb_descs) + uint32_t *nb_descs, uint32_t vq_size) { if (desc->flags & VRING_DESC_F_WRITE) return desc; while (desc->flags & VRING_DESC_F_NEXT) { - if (unlikely(*nb_descs == 0)) + if (unlikely(*nb_descs == 0 || desc->next >= vq_size)) return NULL; (*nb_descs)--; @@ -487,13 +487,13 @@ find_write_desc(struct vring_desc *head, struct vring_desc *desc, static struct virtio_crypto_inhdr * reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc, - uint32_t *nb_descs) + uint32_t *nb_descs, uint32_t vq_size) { uint64_t dlen; struct virtio_crypto_inhdr *inhdr; while (desc->flags & VRING_DESC_F_NEXT) { - if (unlikely(*nb_descs == 0)) + if (unlikely(*nb_descs == 0 || desc->next >= vq_size)) return NULL; (*nb_descs)--; desc = &vc_req->head[desc->next]; @@ -510,14 +510,14 @@ reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc, static __rte_always_inline int move_desc(struct vring_desc *head, struct vring_desc **cur_desc, - uint32_t size, uint32_t *nb_descs) + uint32_t size, uint32_t *nb_descs, uint32_t vq_size) { struct vring_desc *desc = *cur_desc; int left = size - desc->len; while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) { (*nb_descs)--; - if (unlikely(*nb_descs == 0)) + if (unlikely(*nb_descs == 0 || desc->next >= vq_size)) return -1; desc = &head[desc->next]; @@ -530,8 +530,12 @@ move_desc(struct vring_desc *head, struct vring_desc **cur_desc, if (unlikely(*nb_descs == 0)) *cur_desc = NULL; - else + else { + if (unlikely(desc->next >= vq_size)) + return -1; *cur_desc = &head[desc->next]; + } + return 0; } @@ -553,7 +557,8 @@ get_data_ptr(struct vhost_crypto_data_req *vc_req, struct vring_desc *cur_desc, static int copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req, - struct vring_desc **cur_desc, uint32_t size, uint32_t *nb_descs) + struct vring_desc **cur_desc, uint32_t size, + uint32_t *nb_descs, uint32_t vq_size) { struct vring_desc *desc = *cur_desc; uint64_t remain, addr, dlen, len; @@ -595,7 +600,7 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req, left -= to_copy; while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) { - if (unlikely(*nb_descs == 0)) { + if (unlikely(*nb_descs == 0 || desc->next >= vq_size)) { VC_LOG_ERR("Invalid descriptors"); return -1; } @@ -645,8 +650,11 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req, if (unlikely(*nb_descs == 0)) *cur_desc = NULL; - else + else { + if (unlikely(desc->next >= vq_size)) + return -1; *cur_desc = &vc_req->head[desc->next]; + } return 0; } @@ -657,7 +665,6 @@ write_back_data(struct vhost_crypto_data_req *vc_req) struct vhost_crypto_writeback_data *wb_data = vc_req->wb, *wb_last; while (wb_data) { - rte_prefetch0(wb_data->next); rte_memcpy(wb_data->dst, wb_data->src, wb_data->len); wb_last = wb_data; wb_data = wb_data->next; @@ -707,7 +714,7 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req, uint8_t *src, uint32_t offset, uint64_t write_back_len, - uint32_t *nb_descs) + uint32_t *nb_descs, uint32_t vq_size) { struct vhost_crypto_writeback_data *wb_data, *head; struct vring_desc *desc = *cur_desc; @@ -754,7 +761,7 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req, offset -= desc->len; while (write_back_len) { - if (unlikely(*nb_descs == 0)) { + if (unlikely(*nb_descs == 0 || desc->next >= vq_size)) { VC_LOG_ERR("Invalid descriptors"); goto error_exit; } @@ -801,8 +808,11 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req, if (unlikely(*nb_descs == 0)) *cur_desc = NULL; - else + else { + if (unlikely(desc->next >= vq_size)) + goto error_exit; *cur_desc = &vc_req->head[desc->next]; + } *end_wb_data = wb_data; @@ -820,7 +830,7 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, struct vhost_crypto_data_req *vc_req, struct virtio_crypto_cipher_data_req *cipher, struct vring_desc *cur_desc, - uint32_t *nb_descs) + uint32_t *nb_descs, uint32_t vq_size) { struct vring_desc *desc = cur_desc; struct vhost_crypto_writeback_data *ewb = NULL; @@ -831,7 +841,7 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, /* prepare */ /* iv */ if (unlikely(copy_data(iv_data, vc_req, &desc, cipher->para.iv_len, - nb_descs) < 0)) { + nb_descs, vq_size) < 0)) { ret = VIRTIO_CRYPTO_BADMSG; goto error_exit; } @@ -851,7 +861,8 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, } if (unlikely(move_desc(vc_req->head, &desc, - cipher->para.src_data_len, nb_descs) < 0)) { + cipher->para.src_data_len, nb_descs, + vq_size) < 0)) { VC_LOG_ERR("Incorrect descriptor"); ret = VIRTIO_CRYPTO_ERR; goto error_exit; @@ -869,7 +880,7 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, } if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *), vc_req, &desc, cipher->para.src_data_len, - nb_descs) < 0)) { + nb_descs, vq_size) < 0)) { ret = VIRTIO_CRYPTO_BADMSG; goto error_exit; } @@ -880,7 +891,7 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, } /* dst */ - desc = find_write_desc(vc_req->head, desc, nb_descs); + desc = find_write_desc(vc_req->head, desc, nb_descs, vq_size); if (unlikely(!desc)) { VC_LOG_ERR("Cannot find write location"); ret = VIRTIO_CRYPTO_BADMSG; @@ -899,7 +910,8 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, } if (unlikely(move_desc(vc_req->head, &desc, - cipher->para.dst_data_len, nb_descs) < 0)) { + cipher->para.dst_data_len, + nb_descs, vq_size) < 0)) { VC_LOG_ERR("Incorrect descriptor"); ret = VIRTIO_CRYPTO_ERR; goto error_exit; @@ -910,7 +922,7 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE: vc_req->wb = prepare_write_back_data(vc_req, &desc, &ewb, rte_pktmbuf_mtod(m_src, uint8_t *), 0, - cipher->para.dst_data_len, nb_descs); + cipher->para.dst_data_len, nb_descs, vq_size); if (unlikely(vc_req->wb == NULL)) { ret = VIRTIO_CRYPTO_ERR; goto error_exit; @@ -953,7 +965,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, struct vhost_crypto_data_req *vc_req, struct virtio_crypto_alg_chain_data_req *chain, struct vring_desc *cur_desc, - uint32_t *nb_descs) + uint32_t *nb_descs, uint32_t vq_size) { struct vring_desc *desc = cur_desc, *digest_desc; struct vhost_crypto_writeback_data *ewb = NULL, *ewb2 = NULL; @@ -966,7 +978,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, /* prepare */ /* iv */ if (unlikely(copy_data(iv_data, vc_req, &desc, - chain->para.iv_len, nb_descs) < 0)) { + chain->para.iv_len, nb_descs, vq_size) < 0)) { ret = VIRTIO_CRYPTO_BADMSG; goto error_exit; } @@ -987,7 +999,8 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, } if (unlikely(move_desc(vc_req->head, &desc, - chain->para.src_data_len, nb_descs) < 0)) { + chain->para.src_data_len, + nb_descs, vq_size) < 0)) { VC_LOG_ERR("Incorrect descriptor"); ret = VIRTIO_CRYPTO_ERR; goto error_exit; @@ -1004,7 +1017,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, } if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *), vc_req, &desc, chain->para.src_data_len, - nb_descs)) < 0) { + nb_descs, vq_size)) < 0) { ret = VIRTIO_CRYPTO_BADMSG; goto error_exit; } @@ -1016,7 +1029,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, } /* dst */ - desc = find_write_desc(vc_req->head, desc, nb_descs); + desc = find_write_desc(vc_req->head, desc, nb_descs, vq_size); if (unlikely(!desc)) { VC_LOG_ERR("Cannot find write location"); ret = VIRTIO_CRYPTO_BADMSG; @@ -1035,7 +1048,8 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, } if (unlikely(move_desc(vc_req->head, &desc, - chain->para.dst_data_len, nb_descs) < 0)) { + chain->para.dst_data_len, + nb_descs, vq_size) < 0)) { VC_LOG_ERR("Incorrect descriptor"); ret = VIRTIO_CRYPTO_ERR; goto error_exit; @@ -1052,7 +1066,8 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, } if (unlikely(move_desc(vc_req->head, &desc, - chain->para.hash_result_len, nb_descs) < 0)) { + chain->para.hash_result_len, + nb_descs, vq_size) < 0)) { VC_LOG_ERR("Incorrect descriptor"); ret = VIRTIO_CRYPTO_ERR; goto error_exit; @@ -1064,7 +1079,8 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, rte_pktmbuf_mtod(m_src, uint8_t *), chain->para.cipher_start_src_offset, chain->para.dst_data_len - - chain->para.cipher_start_src_offset, nb_descs); + chain->para.cipher_start_src_offset, + nb_descs, vq_size); if (unlikely(vc_req->wb == NULL)) { ret = VIRTIO_CRYPTO_ERR; goto error_exit; @@ -1078,14 +1094,15 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, /** create a wb_data for digest */ ewb->next = prepare_write_back_data(vc_req, &desc, &ewb2, digest_addr, 0, chain->para.hash_result_len, - nb_descs); + nb_descs, vq_size); if (unlikely(ewb->next == NULL)) { ret = VIRTIO_CRYPTO_ERR; goto error_exit; } if (unlikely(copy_data(digest_addr, vc_req, &digest_desc, - chain->para.hash_result_len, nb_descs)) < 0) { + chain->para.hash_result_len, + nb_descs, vq_size)) < 0) { ret = VIRTIO_CRYPTO_BADMSG; goto error_exit; } @@ -1180,7 +1197,7 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto, case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE: req = &tmp_req; if (unlikely(copy_data(req, vc_req, &desc, sizeof(*req), - &nb_descs) < 0)) { + &nb_descs, vq->size) < 0)) { err = VIRTIO_CRYPTO_BADMSG; VC_LOG_ERR("Invalid descriptor"); goto error_exit; @@ -1193,7 +1210,7 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto, } } else { if (unlikely(move_desc(vc_req->head, &desc, - sizeof(*req), &nb_descs) < 0)) { + sizeof(*req), &nb_descs, vq->size) < 0)) { VC_LOG_ERR("Incorrect descriptor"); goto error_exit; } @@ -1235,12 +1252,12 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto, case VIRTIO_CRYPTO_SYM_OP_CIPHER: err = prepare_sym_cipher_op(vcrypto, op, vc_req, &req->u.sym_req.u.cipher, desc, - &nb_descs); + &nb_descs, vq->size); break; case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING: err = prepare_sym_chain_op(vcrypto, op, vc_req, &req->u.sym_req.u.chain, desc, - &nb_descs); + &nb_descs, vq->size); break; } if (unlikely(err != 0)) { @@ -1258,7 +1275,7 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto, error_exit: - inhdr = reach_inhdr(vc_req, desc, &nb_descs); + inhdr = reach_inhdr(vc_req, desc, &nb_descs, vq->size); if (likely(inhdr != NULL)) inhdr->status = (uint8_t)err;