From patchwork Sun Jul 5 09:23:46 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ori Kam X-Patchwork-Id: 73131 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 86C39A00C5; Sun, 5 Jul 2020 11:27:36 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 0E0831DBE5; Sun, 5 Jul 2020 11:25:17 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 3BC251DC0B for ; Sun, 5 Jul 2020 11:25:13 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from orika@mellanox.com) with SMTP; 5 Jul 2020 12:25:11 +0300 Received: from pegasus04.mtr.labs.mlnx. (pegasus04.mtr.labs.mlnx [10.210.16.126]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 0659OC45028191; Sun, 5 Jul 2020 12:25:05 +0300 From: Ori Kam To: jerinj@marvell.com, xiang.w.wang@intel.com, matan@mellanox.com, viacheslavo@mellanox.com Cc: guyk@marvell.com, dev@dpdk.org, pbhagavatula@marvell.com, shahafs@mellanox.com, hemant.agrawal@nxp.com, opher@mellanox.com, alexr@mellanox.com, dovrat@marvell.com, pkapoor@marvell.com, nipun.gupta@nxp.com, bruce.richardson@intel.com, yang.a.hong@intel.com, harry.chang@intel.com, gu.jian1@zte.com.cn, shanjiangh@chinatelecom.cn, zhangy.yun@chinatelecom.cn, lixingfu@huachentel.com, wushuai@inspur.com, yuyingxia@yxlink.com, fanchenggang@sunyainfo.com, davidfgao@tencent.com, liuzhong1@chinaunicom.cn, zhaoyong11@huawei.com, oc@yunify.com, jim@netgate.com, hongjun.ni@intel.com, deri@ntop.org, fc@napatech.com, arthur.su@lionic.com, thomas@monjalon.net, orika@mellanox.com, rasland@mellanox.com, Yuval Avnery Date: Sun, 5 Jul 2020 09:23:46 +0000 Message-Id: <1593941027-86651-20-git-send-email-orika@mellanox.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1593941027-86651-1-git-send-email-orika@mellanox.com> References: <1593941027-86651-1-git-send-email-orika@mellanox.com> Subject: [dpdk-dev] [PATCH 19/20] regex/mlx5: implement dequeue function X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Yuval Avnery Implement dequeue function for the regex API. Signed-off-by: Yuval Avnery Acked-by: Ori Kam --- drivers/regex/mlx5/mlx5_regex.c | 1 + drivers/regex/mlx5/mlx5_regex.h | 4 ++ drivers/regex/mlx5/mlx5_regex_control.c | 1 + drivers/regex/mlx5/mlx5_regex_fastpath.c | 100 +++++++++++++++++++++++++++++++ 4 files changed, 106 insertions(+) diff --git a/drivers/regex/mlx5/mlx5_regex.c b/drivers/regex/mlx5/mlx5_regex.c index 503bdf8..433ed17 100644 --- a/drivers/regex/mlx5/mlx5_regex.c +++ b/drivers/regex/mlx5/mlx5_regex.c @@ -180,6 +180,7 @@ } priv->regexdev->dev_ops = &mlx5_regexdev_ops; priv->regexdev->enqueue = mlx5_regexdev_enqueue; + priv->regexdev->dequeue = mlx5_regexdev_dequeue; priv->regexdev->device = (struct rte_device *)pci_dev; priv->regexdev->data->dev_private = priv; priv->regexdev->state = RTE_REGEXDEV_READY; diff --git a/drivers/regex/mlx5/mlx5_regex.h b/drivers/regex/mlx5/mlx5_regex.h index e28c7d3..390d9d4 100644 --- a/drivers/regex/mlx5/mlx5_regex.h +++ b/drivers/regex/mlx5/mlx5_regex.h @@ -38,6 +38,7 @@ struct mlx5_regex_cq { uint32_t dbr_umem; /* Door bell record umem id. */ volatile struct mlx5_cqe *cqe; /* The CQ ring buffer. */ struct mlx5dv_devx_umem *cqe_umem; /* CQ buffer umem. */ + size_t ci; uint32_t *dbr; }; @@ -96,4 +97,7 @@ int mlx5_regex_rules_db_import(struct rte_regexdev *dev, int mlx5_regexdev_setup_fastpath(struct mlx5_regex_priv *priv, uint32_t qp_id); uint16_t mlx5_regexdev_enqueue(struct rte_regexdev *dev, uint16_t qp_id, struct rte_regex_ops **ops, uint16_t nb_ops); +uint16_t mlx5_regexdev_dequeue(struct rte_regexdev *dev, uint16_t qp_id, + struct rte_regex_ops **ops, uint16_t nb_ops); + #endif /* MLX5_REGEX_H */ diff --git a/drivers/regex/mlx5/mlx5_regex_control.c b/drivers/regex/mlx5/mlx5_regex_control.c index c0ed0b5..d17a6b9 100644 --- a/drivers/regex/mlx5/mlx5_regex_control.c +++ b/drivers/regex/mlx5/mlx5_regex_control.c @@ -124,6 +124,7 @@ cq->cqe_umem = mlx5_glue->devx_umem_reg(priv->ctx, buf, sizeof(struct mlx5_cqe) * cq_size, 7); + cq->ci = 0; if (!cq->cqe_umem) { DRV_LOG(ERR, "Can't register cqe mem."); rte_errno = ENOMEM; diff --git a/drivers/regex/mlx5/mlx5_regex_fastpath.c b/drivers/regex/mlx5/mlx5_regex_fastpath.c index 072bc57..d754e91 100644 --- a/drivers/regex/mlx5/mlx5_regex_fastpath.c +++ b/drivers/regex/mlx5/mlx5_regex_fastpath.c @@ -182,6 +182,106 @@ void mlx5_regex_set_ctrl_seg(void *seg, return i; } +#define MLX5_REGEX_RESP_SZ 8 + +static inline void +extract_result(struct rte_regex_ops *op, struct mlx5_regex_job *job) +{ + size_t j, offset; + op->user_id = job->user_id; + op->nb_matches = DEVX_GET(regexp_metadata, job->metadata + 32, + match_count); + op->nb_actual_matches = DEVX_GET(regexp_metadata, job->metadata + 32, + detected_match_count); + for (j = 0; j < op->nb_matches; j++) { + offset = MLX5_REGEX_RESP_SZ * j; + op->matches[j].rule_id = + DEVX_GET(regexp_match_tuple, (job->output + offset), + rule_id); + op->matches[j].start_offset = + DEVX_GET(regexp_match_tuple, (job->output + offset), + start_ptr); + op->matches[j].len = + DEVX_GET(regexp_match_tuple, (job->output + offset), + length); + } +} + +static inline volatile struct mlx5_cqe * +poll_one(struct mlx5_regex_cq *cq) +{ + volatile struct mlx5_cqe *cqe; + size_t next_cqe_offset; + + next_cqe_offset = (cq->ci % cq_size_get(cq)) * sizeof(*cqe); + cqe = (volatile struct mlx5_cqe *)(cq->cqe + next_cqe_offset); + rte_cio_wmb(); + + int ret = check_cqe(cqe, cq_size_get(cq), cq->ci); + + if (unlikely(ret == MLX5_CQE_STATUS_ERR)) { + DRV_LOG(ERR, "Completion with error on qp 0x%x", 0); + exit(-1); + } + + if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) + return NULL; + + return cqe; +} + + +/** + * DPDK callback for dequeue. + * + * @param dev + * Pointer to the regex dev structure. + * @param qp_id + * The queue to enqueue the traffic to. + * @param ops + * List of regex ops to dequeue. + * @param nb_ops + * Number of ops in ops parameter. + * + * @return + * Number of packets successfully dequeued (<= pkts_n). + */ +uint16_t +mlx5_regexdev_dequeue(struct rte_regexdev *dev, uint16_t qp_id, + struct rte_regex_ops **ops, uint16_t nb_ops) +{ + struct mlx5_regex_priv *priv = dev->data->dev_private; + struct mlx5_regex_qp *queue = &priv->qps[qp_id]; + struct mlx5_regex_cq *cq = &queue->cq; + volatile struct mlx5_cqe *cqe; + size_t i = 0; + + while ((cqe = poll_one(cq))) { + uint16_t wq_counter + = (be16toh(cqe->wqe_counter) + 1)%MAX_WQE_INDEX; + size_t sqid = cqe->rsvd3[2]; + struct mlx5_regex_sq *sq = &queue->sqs[sqid]; + while (sq->ci != wq_counter) { + if (unlikely(i == nb_ops)) { + /* Return without updating cq->ci */ + goto out; + } + uint32_t job_id = job_id_get(sqid, + sq_size_get(sq), sq->ci); + extract_result(ops[i], &queue->jobs[job_id]); + sq->ci = (sq->ci+1)%MAX_WQE_INDEX; + i++; + } + cq->ci = (cq->ci + 1) & 0xffffff; + asm volatile("" ::: "memory"); + cq->dbr[0] = htobe32(cq->ci); + queue->free_sqs |= (1 << sqid); + } + +out: + queue->ci += i; + return i; +} static MLX5DV_ALWAYS_INLINE void mlx5dv_set_metadata_seg(struct mlx5_wqe_metadata_seg *seg,