From patchwork Mon Sep 28 10:59:15 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ferruh Yigit X-Patchwork-Id: 79008 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id E44A3A04C3; Mon, 28 Sep 2020 13:00:11 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 9EB641D8FD; Mon, 28 Sep 2020 12:59:35 +0200 (CEST) Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by dpdk.org (Postfix) with ESMTP id 2D3BB1D8DE; Mon, 28 Sep 2020 12:59:31 +0200 (CEST) IronPort-SDR: KGp7ke5WhhoWGGdVimVTcMHCfmTBTzqKUOH+KDIDGd8VMYokKVMR4q80KP9/Dp5KPKsUL5FA+f 9sFhEiuevhNg== X-IronPort-AV: E=McAfee;i="6000,8403,9757"; a="226122024" X-IronPort-AV: E=Sophos;i="5.77,313,1596524400"; d="scan'208";a="226122024" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga005.fm.intel.com ([10.253.24.32]) by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 28 Sep 2020 03:59:30 -0700 IronPort-SDR: om6G8v4zL35cM4rMDp0NVJs2nIsLrbz/7iuvR+aFwVdo+OJguijtnFBXnrbz12bFVloVoRntXo 6g3WyObF0ycw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,313,1596524400"; d="scan'208";a="514212875" Received: from silpixa00399752.ir.intel.com (HELO silpixa00399752.ger.corp.intel.com) ([10.237.222.180]) by fmsmga005.fm.intel.com with ESMTP; 28 Sep 2020 03:59:28 -0700 From: Ferruh Yigit To: Maxime Coquelin , Chenbo Xia , Zhihong Wang , Jay Zhou , Fan Zhang Cc: dev@dpdk.org, Ferruh Yigit , stable@dpdk.org Date: Mon, 28 Sep 2020 11:59:15 +0100 Message-Id: <20200928105918.740807-3-ferruh.yigit@intel.com> X-Mailer: git-send-email 2.26.2 In-Reply-To: <20200928105918.740807-1-ferruh.yigit@intel.com> References: <20200928105918.740807-1-ferruh.yigit@intel.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH 3/6] vhost/crypto: fix missed request check for copy mode X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Fan Zhang This patch fixes the missed request check to vhost crypto copy mode. CVE-2020-14376 CVE-2020-14377 Fixes: 3bb595ecd682 ("vhost/crypto: add request handler") Cc: stable@dpdk.org Signed-off-by: Fan Zhang Acked-by: Chenbo Xia --- lib/librte_vhost/vhost_crypto.c | 68 +++++++++++++++++++++++---------- 1 file changed, 47 insertions(+), 21 deletions(-) diff --git a/lib/librte_vhost/vhost_crypto.c b/lib/librte_vhost/vhost_crypto.c index 86747dd5f3..494f49084b 100644 --- a/lib/librte_vhost/vhost_crypto.c +++ b/lib/librte_vhost/vhost_crypto.c @@ -756,7 +756,7 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req, } wb_data->dst = dst; - wb_data->len = desc->len - offset; + wb_data->len = RTE_MIN(desc->len - offset, write_back_len); write_back_len -= wb_data->len; src += offset + wb_data->len; offset = 0; @@ -840,6 +840,17 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req, return NULL; } +static __rte_always_inline uint8_t +vhost_crypto_check_cipher_request(struct virtio_crypto_cipher_data_req *req) +{ + if (likely((req->para.iv_len <= VHOST_CRYPTO_MAX_IV_LEN) && + (req->para.src_data_len <= RTE_MBUF_DEFAULT_BUF_SIZE) && + (req->para.dst_data_len >= req->para.src_data_len) && + (req->para.dst_data_len <= RTE_MBUF_DEFAULT_BUF_SIZE))) + return VIRTIO_CRYPTO_OK; + return VIRTIO_CRYPTO_BADMSG; +} + static uint8_t prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, struct vhost_crypto_data_req *vc_req, @@ -851,7 +862,10 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, struct vhost_crypto_writeback_data *ewb = NULL; struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst; uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET); - uint8_t ret = 0; + uint8_t ret = vhost_crypto_check_cipher_request(cipher); + + if (unlikely(ret != VIRTIO_CRYPTO_OK)) + goto error_exit; /* prepare */ /* iv */ @@ -861,10 +875,9 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, goto error_exit; } - m_src->data_len = cipher->para.src_data_len; - switch (vcrypto->option) { case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE: + m_src->data_len = cipher->para.src_data_len; m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr, cipher->para.src_data_len); m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO); @@ -886,13 +899,7 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, break; case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE: vc_req->wb_pool = vcrypto->wb_pool; - - if (unlikely(cipher->para.src_data_len > - RTE_MBUF_DEFAULT_BUF_SIZE)) { - VC_LOG_ERR("Not enough space to do data copy"); - ret = VIRTIO_CRYPTO_ERR; - goto error_exit; - } + m_src->data_len = cipher->para.src_data_len; if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *), vc_req, &desc, cipher->para.src_data_len, nb_descs, vq_size) < 0)) { @@ -975,6 +982,29 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, return ret; } +static __rte_always_inline uint8_t +vhost_crypto_check_chain_request(struct virtio_crypto_alg_chain_data_req *req) +{ + if (likely((req->para.iv_len <= VHOST_CRYPTO_MAX_IV_LEN) && + (req->para.src_data_len <= RTE_MBUF_DEFAULT_DATAROOM) && + (req->para.dst_data_len >= req->para.src_data_len) && + (req->para.dst_data_len <= RTE_MBUF_DEFAULT_DATAROOM) && + (req->para.cipher_start_src_offset < + RTE_MBUF_DEFAULT_DATAROOM) && + (req->para.len_to_cipher < RTE_MBUF_DEFAULT_DATAROOM) && + (req->para.hash_start_src_offset < + RTE_MBUF_DEFAULT_DATAROOM) && + (req->para.len_to_hash < RTE_MBUF_DEFAULT_DATAROOM) && + (req->para.cipher_start_src_offset + req->para.len_to_cipher <= + req->para.src_data_len) && + (req->para.hash_start_src_offset + req->para.len_to_hash <= + req->para.src_data_len) && + (req->para.dst_data_len + req->para.hash_result_len <= + RTE_MBUF_DEFAULT_DATAROOM))) + return VIRTIO_CRYPTO_OK; + return VIRTIO_CRYPTO_BADMSG; +} + static uint8_t prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, struct vhost_crypto_data_req *vc_req, @@ -988,7 +1018,10 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET); uint32_t digest_offset; void *digest_addr; - uint8_t ret = 0; + uint8_t ret = vhost_crypto_check_chain_request(chain); + + if (unlikely(ret != VIRTIO_CRYPTO_OK)) + goto error_exit; /* prepare */ /* iv */ @@ -998,10 +1031,9 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, goto error_exit; } - m_src->data_len = chain->para.src_data_len; - switch (vcrypto->option) { case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE: + m_src->data_len = chain->para.src_data_len; m_dst->data_len = chain->para.dst_data_len; m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr, @@ -1023,13 +1055,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, break; case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE: vc_req->wb_pool = vcrypto->wb_pool; - - if (unlikely(chain->para.src_data_len > - RTE_MBUF_DEFAULT_BUF_SIZE)) { - VC_LOG_ERR("Not enough space to do data copy"); - ret = VIRTIO_CRYPTO_ERR; - goto error_exit; - } + m_src->data_len = chain->para.src_data_len; if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *), vc_req, &desc, chain->para.src_data_len, nb_descs, vq_size) < 0)) {