From patchwork Fri Jul 5 17:15:51 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Fiona Trahe X-Patchwork-Id: 56170 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 1D79A1BDF0; Fri, 5 Jul 2019 19:15:56 +0200 (CEST) Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by dpdk.org (Postfix) with ESMTP id 8C02B1BDEF for ; Fri, 5 Jul 2019 19:15:54 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by orsmga101.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 05 Jul 2019 10:15:53 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.63,455,1557212400"; d="scan'208";a="169714181" Received: from sivswdev09.ir.intel.com (HELO localhost.localdomain) ([10.237.217.48]) by orsmga006.jf.intel.com with ESMTP; 05 Jul 2019 10:15:52 -0700 From: Fiona Trahe To: dev@dpdk.org Cc: akhil.goyal@nxp.com, arturx.trybula@intel.com, tjozwiakgm@gmail.com, fiona.trahe@intel.com Date: Fri, 5 Jul 2019 18:15:51 +0100 Message-Id: <1562346951-18190-1-git-send-email-fiona.trahe@intel.com> X-Mailer: git-send-email 1.7.0.7 In-Reply-To: <20190521150941.12689-1-adamx.dybkowski@intel.com> References: <20190521150941.12689-1-adamx.dybkowski@intel.com> Subject: [dpdk-dev] [PATCH v2] compress/qat: fixed overflow status return from qat pmd X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Tomasz Jozwiak This patch fixes fail status returned from compression PMD in case destination buffer size is not enough to store all data. Fixes: 3dc9ef2d23fe ("compress/qat: fix returned status on overflow") Cc: stable@dpdk.org Signed-off-by: Tomasz Jozwiak Signed-off-by: Adam Dybkowski Acked-by: Adam Dybkowski --- drivers/common/qat/qat_qp.c | 4 +++- drivers/common/qat/qat_qp.h | 3 ++- drivers/compress/qat/qat_comp.c | 28 +++++++++++++++++++++++++++- drivers/compress/qat/qat_comp.h | 5 ++++- drivers/compress/qat/qat_comp_pmd.c | 2 ++ 5 files changed, 38 insertions(+), 4 deletions(-) diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c index 131215296..03f11f869 100644 --- a/drivers/common/qat/qat_qp.c +++ b/drivers/common/qat/qat_qp.c @@ -651,7 +651,8 @@ qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops) qat_sym_process_response(ops, resp_msg); else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION) qat_comp_process_response(ops, resp_msg, - &tmp_qp->stats.dequeue_err_count); + tmp_qp->op_cookies[head / rx_queue->msg_size], + &tmp_qp->stats.dequeue_err_count); else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC) { #ifdef BUILD_QAT_ASYM qat_asym_process_response(ops, resp_msg, @@ -686,6 +687,7 @@ qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops) __rte_weak int qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused, + void *op_cookie __rte_unused, uint64_t *dequeue_err_count __rte_unused) { return 0; diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h index 9833bcbd8..980c2ba32 100644 --- a/drivers/common/qat/qat_qp.h +++ b/drivers/common/qat/qat_qp.h @@ -108,6 +108,7 @@ qat_qps_per_service(const struct qat_qp_hw_data *qp_hw_data, /* Needed for weak function*/ int qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused, - uint64_t *dequeue_err_count); + void *op_cookie __rte_unused, + uint64_t *dequeue_err_count __rte_unused); #endif /* _QAT_QP_H_ */ diff --git a/drivers/compress/qat/qat_comp.c b/drivers/compress/qat/qat_comp.c index dd0fe1b34..835aaa838 100644 --- a/drivers/compress/qat/qat_comp.c +++ b/drivers/compress/qat/qat_comp.c @@ -170,6 +170,18 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg, rte_pktmbuf_mtophys_offset(op->m_dst, op->dst.offset); } + if (unlikely(rte_pktmbuf_pkt_len(op->m_dst) < QAT_MIN_OUT_BUF_SIZE)) { + /* QAT doesn't support dest. buffer lower + * than QAT_MIN_OUT_BUF_SIZE. Propagate error mark + * by converting this request to the null one + * and check the status in the response. + */ + QAT_DP_LOG(WARNING, "QAT destination buffer too small - resend with larger buffer"); + comp_req->comn_hdr.service_type = ICP_QAT_FW_COMN_REQ_NULL; + comp_req->comn_hdr.service_cmd_id = ICP_QAT_FW_NULL_REQ_SERV_ID; + cookie->error = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED; + } + #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG QAT_DP_LOG(DEBUG, "Direction: %s", qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ? @@ -181,10 +193,13 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg, } int -qat_comp_process_response(void **op, uint8_t *resp, uint64_t *dequeue_err_count) +qat_comp_process_response(void **op, uint8_t *resp, void *op_cookie, + uint64_t *dequeue_err_count) { struct icp_qat_fw_comp_resp *resp_msg = (struct icp_qat_fw_comp_resp *)resp; + struct qat_comp_op_cookie *cookie = + (struct qat_comp_op_cookie *)op_cookie; struct rte_comp_op *rx_op = (struct rte_comp_op *)(uintptr_t) (resp_msg->opaque_data); struct qat_comp_xform *qat_xform = (struct qat_comp_xform *) @@ -201,6 +216,17 @@ qat_comp_process_response(void **op, uint8_t *resp, uint64_t *dequeue_err_count) sizeof(struct icp_qat_fw_comp_resp)); #endif + if (unlikely(cookie->error)) { + rx_op->status = cookie->error; + cookie->error = 0; + ++(*dequeue_err_count); + rx_op->debug_status = 0; + rx_op->consumed = 0; + rx_op->produced = 0; + *op = (void *)rx_op; + return 0; + } + if (likely(qat_xform->qat_comp_request_type != QAT_COMP_REQUEST_DECOMPRESS)) { if (unlikely(ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET( diff --git a/drivers/compress/qat/qat_comp.h b/drivers/compress/qat/qat_comp.h index 1312ee93b..61d12ecf4 100644 --- a/drivers/compress/qat/qat_comp.h +++ b/drivers/compress/qat/qat_comp.h @@ -24,6 +24,8 @@ /* fallback to fixed compression threshold */ #define QAT_FALLBACK_THLD ((uint32_t)(RTE_PMD_QAT_COMP_IM_BUFFER_SIZE / 1.1)) +#define QAT_MIN_OUT_BUF_SIZE 46 + enum qat_comp_request_type { QAT_COMP_REQUEST_FIXED_COMP_STATELESS, QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS, @@ -45,6 +47,7 @@ struct qat_comp_op_cookie { phys_addr_t qat_sgl_src_phys_addr; phys_addr_t qat_sgl_dst_phys_addr; /* dynamically created SGLs */ + uint8_t error; uint8_t socket_id; uint16_t src_nb_elems; uint16_t dst_nb_elems; @@ -63,7 +66,7 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg, void *op_cookie, enum qat_device_gen qat_dev_gen __rte_unused); int -qat_comp_process_response(void **op, uint8_t *resp, +qat_comp_process_response(void **op, uint8_t *resp, void *op_cookie, uint64_t *dequeue_err_count); int diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c index 1d9d72e8f..072647217 100644 --- a/drivers/compress/qat/qat_comp_pmd.c +++ b/drivers/compress/qat/qat_comp_pmd.c @@ -168,6 +168,8 @@ qat_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id, QAT_PMD_COMP_SGL_DEF_SEGMENTS; cookie->socket_id = dev->data->socket_id; + + cookie->error = 0; } return ret;