From patchwork Fri Jul 17 11:10:56 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ori Kam X-Patchwork-Id: 74346 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 22140A053D; Fri, 17 Jul 2020 13:12:24 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id C896F1C10A; Fri, 17 Jul 2020 13:11:42 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id AE6BA1C032 for ; Fri, 17 Jul 2020 13:11:37 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from orika@mellanox.com) with SMTP; 17 Jul 2020 14:11:33 +0300 Received: from pegasus04.mtr.labs.mlnx. (pegasus04.mtr.labs.mlnx [10.210.16.126]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 06HBB6Kv025276; Fri, 17 Jul 2020 14:11:33 +0300 From: Ori Kam To: jerinj@marvell.com, xiang.w.wang@intel.com, matan@mellanox.com, viacheslavo@mellanox.com Cc: guyk@marvell.com, dev@dpdk.org, pbhagavatula@marvell.com, shahafs@mellanox.com, hemant.agrawal@nxp.com, opher@mellanox.com, alexr@mellanox.com, dovrat@marvell.com, pkapoor@marvell.com, nipun.gupta@nxp.com, bruce.richardson@intel.com, yang.a.hong@intel.com, harry.chang@intel.com, gu.jian1@zte.com.cn, shanjiangh@chinatelecom.cn, zhangy.yun@chinatelecom.cn, lixingfu@huachentel.com, wushuai@inspur.com, yuyingxia@yxlink.com, fanchenggang@sunyainfo.com, davidfgao@tencent.com, liuzhong1@chinaunicom.cn, zhaoyong11@huawei.com, oc@yunify.com, jim@netgate.com, hongjun.ni@intel.com, deri@ntop.org, fc@napatech.com, arthur.su@lionic.com, thomas@monjalon.net, orika@mellanox.com, rasland@mellanox.com Date: Fri, 17 Jul 2020 11:10:56 +0000 Message-Id: <1594984263-89741-7-git-send-email-orika@mellanox.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1594984263-89741-1-git-send-email-orika@mellanox.com> References: <1593941027-86651-1-git-send-email-orika@mellanox.com> <1594984263-89741-1-git-send-email-orika@mellanox.com> Subject: [dpdk-dev] [PATCH v4 06/13] regex/mlx5: add configure function X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This commit implements the configure function. This function is responsible to configure the RegEx engine. Signed-off-by: Ori Kam --- drivers/regex/mlx5/mlx5_regex.c | 2 + drivers/regex/mlx5/mlx5_regex.h | 15 +++ drivers/regex/mlx5/mlx5_rxp.c | 235 +++++++++++++++++++++++++++++++++++++++- 3 files changed, 251 insertions(+), 1 deletion(-) diff --git a/drivers/regex/mlx5/mlx5_regex.c b/drivers/regex/mlx5/mlx5_regex.c index 1cb44f7..656b904 100644 --- a/drivers/regex/mlx5/mlx5_regex.c +++ b/drivers/regex/mlx5/mlx5_regex.c @@ -23,6 +23,7 @@ const struct rte_regexdev_ops mlx5_regexdev_ops = { .dev_info_get = mlx5_regex_info_get, + .dev_configure = mlx5_regex_configure, }; static struct ibv_device * @@ -143,6 +144,7 @@ priv->regexdev->dev_ops = &mlx5_regexdev_ops; priv->regexdev->device = (struct rte_device *)pci_dev; priv->regexdev->data->dev_private = priv; + priv->regexdev->state = RTE_REGEXDEV_READY; return 0; error: diff --git a/drivers/regex/mlx5/mlx5_regex.h b/drivers/regex/mlx5/mlx5_regex.h index 082d134..f17b4f8 100644 --- a/drivers/regex/mlx5/mlx5_regex.h +++ b/drivers/regex/mlx5/mlx5_regex.h @@ -7,16 +7,31 @@ #include +struct mlx5_regex_sq { + uint32_t nb_desc; /* Number of desc for this object. */ +}; + +struct mlx5_regex_qp { + uint32_t flags; /* QP user flags. */ + uint32_t nb_desc; /* Total number of desc for this qp. */ + struct mlx5_regex_sq *sqs; /* Pointer to sq array. */ +}; + struct mlx5_regex_priv { TAILQ_ENTRY(mlx5_regex_priv) next; struct ibv_context *ctx; /* Device context. */ struct rte_pci_device *pci_dev; struct rte_regexdev *regexdev; /* Pointer to the RegEx dev. */ + uint16_t nb_queues; /* Number of queues. */ + struct mlx5_regex_qp *qps; /* Pointer to the qp array. */ + uint16_t nb_max_matches; /* Max number of matches. */ }; /* mlx5_rxp.c */ int mlx5_regex_info_get(struct rte_regexdev *dev, struct rte_regexdev_info *info); +int mlx5_regex_configure(struct rte_regexdev *dev, + const struct rte_regexdev_config *cfg); /* mlx5_regex_devx.c */ int mlx5_devx_regex_register_write(struct ibv_context *ctx, int engine_id, diff --git a/drivers/regex/mlx5/mlx5_rxp.c b/drivers/regex/mlx5/mlx5_rxp.c index a5a6f15..18e2338 100644 --- a/drivers/regex/mlx5/mlx5_rxp.c +++ b/drivers/regex/mlx5/mlx5_rxp.c @@ -2,13 +2,22 @@ * Copyright 2020 Mellanox Technologies, Ltd */ +#include + #include #include +#include #include #include #include +#include +#include +#include + #include "mlx5_regex.h" +#include "mlx5_regex_utils.h" +#include "mlx5_rxp_csrs.h" #define MLX5_REGEX_MAX_MATCHES 255 #define MLX5_REGEX_MAX_PAYLOAD_SIZE UINT16_MAX @@ -17,7 +26,7 @@ int mlx5_regex_info_get(struct rte_regexdev *dev __rte_unused, - struct rte_regexdev_info *info) + struct rte_regexdev_info *info) { info->max_matches = MLX5_REGEX_MAX_MATCHES; info->max_payload_size = MLX5_REGEX_MAX_PAYLOAD_SIZE; @@ -27,3 +36,227 @@ info->rule_flags = 0; return 0; } + +static int +rxp_poll_csr_for_value(struct ibv_context *ctx, uint32_t *value, + uint32_t address, uint32_t expected_value, + uint32_t expected_mask, uint32_t timeout_ms, uint8_t id) +{ + unsigned int i; + int ret; + + ret = -EBUSY; + for (i = 0; i < timeout_ms; i++) { + if (mlx5_devx_regex_register_read(ctx, id, address, value)) + return -1; + + if ((*value & expected_mask) == expected_value) { + ret = 0; + break; + } + rte_delay_us(1000); + } + return ret; +} + +static int +rxp_start_engine(struct ibv_context *ctx, uint8_t id) +{ + uint32_t ctrl; + int ret; + + ret = mlx5_devx_regex_register_read(ctx, id, MLX5_RXP_CSR_CTRL, &ctrl); + if (ret) + return ret; + ctrl |= MLX5_RXP_CSR_CTRL_GO; + ret = mlx5_devx_regex_register_write(ctx, id, MLX5_RXP_CSR_CTRL, ctrl); + return ret; +} + +static int +rxp_stop_engine(struct ibv_context *ctx, uint8_t id) +{ + uint32_t ctrl; + int ret; + + ret = mlx5_devx_regex_register_read(ctx, id, MLX5_RXP_CSR_CTRL, &ctrl); + if (ret) + return ret; + ctrl &= ~MLX5_RXP_CSR_CTRL_GO; + ret = mlx5_devx_regex_register_write(ctx, id, MLX5_RXP_CSR_CTRL, ctrl); + return ret; +} + +static int +rxp_init_rtru(struct ibv_context *ctx, uint8_t id, uint32_t init_bits) +{ + uint32_t ctrl_value; + uint32_t poll_value; + uint32_t expected_value; + uint32_t expected_mask; + int ret = 0; + + /* Read the rtru ctrl CSR */ + ret = mlx5_devx_regex_register_read(ctx, id, MLX5_RXP_RTRU_CSR_CTRL, + &ctrl_value); + if (ret) + return -1; + /* Clear any previous init modes */ + ctrl_value &= ~(MLX5_RXP_RTRU_CSR_CTRL_INIT_MODE_MASK); + if (ctrl_value & MLX5_RXP_RTRU_CSR_CTRL_INIT) { + ctrl_value &= ~(MLX5_RXP_RTRU_CSR_CTRL_INIT); + mlx5_devx_regex_register_write(ctx, id, MLX5_RXP_RTRU_CSR_CTRL, + ctrl_value); + } + /* Set the init_mode bits in the rtru ctrl CSR */ + ctrl_value |= init_bits; + mlx5_devx_regex_register_write(ctx, id, MLX5_RXP_RTRU_CSR_CTRL, + ctrl_value); + /* Need to sleep for a short period after pulsing the rtru init bit. */ + rte_delay_us(20000); + /* Poll the rtru status CSR until all the init done bits are set. */ + DRV_LOG(DEBUG, "waiting for RXP rule memory to complete init"); + /* Set the init bit in the rtru ctrl CSR. */ + ctrl_value |= MLX5_RXP_RTRU_CSR_CTRL_INIT; + mlx5_devx_regex_register_write(ctx, id, MLX5_RXP_RTRU_CSR_CTRL, + ctrl_value); + /* Clear the init bit in the rtru ctrl CSR */ + ctrl_value &= ~MLX5_RXP_RTRU_CSR_CTRL_INIT; + mlx5_devx_regex_register_write(ctx, id, MLX5_RXP_RTRU_CSR_CTRL, + ctrl_value); + /* Check that the following bits are set in the RTRU_CSR. */ + if (init_bits == MLX5_RXP_RTRU_CSR_CTRL_INIT_MODE_L1_L2) { + /* Must be incremental mode */ + expected_value = MLX5_RXP_RTRU_CSR_STATUS_L1C_INIT_DONE | + MLX5_RXP_RTRU_CSR_STATUS_L2C_INIT_DONE; + } else { + expected_value = MLX5_RXP_RTRU_CSR_STATUS_IM_INIT_DONE | + MLX5_RXP_RTRU_CSR_STATUS_L1C_INIT_DONE | + MLX5_RXP_RTRU_CSR_STATUS_L2C_INIT_DONE; + } + expected_mask = expected_value; + ret = rxp_poll_csr_for_value(ctx, &poll_value, + MLX5_RXP_RTRU_CSR_STATUS, + expected_value, expected_mask, + MLX5_RXP_CSR_STATUS_TRIAL_TIMEOUT, id); + if (ret) + return ret; + DRV_LOG(DEBUG, "rule Memory initialise: 0x%08X", poll_value); + /* Clear the init bit in the rtru ctrl CSR */ + ctrl_value &= ~(MLX5_RXP_RTRU_CSR_CTRL_INIT); + mlx5_devx_regex_register_write(ctx, id, MLX5_RXP_RTRU_CSR_CTRL, + ctrl_value); + return 0; +} + +static int +rxp_init(struct mlx5_regex_priv *priv, uint8_t id) +{ + uint32_t ctrl; + uint32_t reg; + struct ibv_context *ctx = priv->ctx; + int ret; + + ret = mlx5_devx_regex_register_read(ctx, id, MLX5_RXP_CSR_CTRL, &ctrl); + if (ret) + return ret; + if (ctrl & MLX5_RXP_CSR_CTRL_INIT) { + ctrl &= ~MLX5_RXP_CSR_CTRL_INIT; + ret = mlx5_devx_regex_register_write(ctx, id, MLX5_RXP_CSR_CTRL, + ctrl); + if (ret) + return ret; + } + ctrl |= MLX5_RXP_CSR_CTRL_INIT; + ret = mlx5_devx_regex_register_write(ctx, id, MLX5_RXP_CSR_CTRL, ctrl); + if (ret) + return ret; + ctrl &= ~MLX5_RXP_CSR_CTRL_INIT; + ret = mlx5_devx_regex_register_write(ctx, id, MLX5_RXP_CSR_CTRL, ctrl); + rte_delay_us(20000); + + ret = rxp_poll_csr_for_value(ctx, &ctrl, MLX5_RXP_CSR_STATUS, + MLX5_RXP_CSR_STATUS_INIT_DONE, + MLX5_RXP_CSR_STATUS_INIT_DONE, + MLX5_RXP_CSR_STATUS_TRIAL_TIMEOUT, id); + if (ret) + return ret; + ret = mlx5_devx_regex_register_read(ctx, id, MLX5_RXP_CSR_CTRL, &ctrl); + if (ret) + return ret; + ctrl &= ~MLX5_RXP_CSR_CTRL_INIT; + ret = mlx5_devx_regex_register_write(ctx, id, MLX5_RXP_CSR_CTRL, + ctrl); + if (ret) + return ret; + rxp_init_rtru(ctx, id, MLX5_RXP_RTRU_CSR_CTRL_INIT_MODE_IM_L1_L2); + ret = rxp_init_rtru(ctx, id, MLX5_RXP_RTRU_CSR_CTRL_INIT_MODE_IM_L1_L2); + if (ret) + return ret; + ret = mlx5_devx_regex_register_read(ctx, id, MLX5_RXP_CSR_CAPABILITY_5, + ®); + if (ret) + return ret; + DRV_LOG(DEBUG, "max matches: %d, DDOS threshold: %d", reg >> 16, + reg & 0xffff); + ret = mlx5_devx_regex_register_write(ctx, id, MLX5_RXP_CSR_MAX_MATCH, + priv->nb_max_matches); + ret |= mlx5_devx_regex_register_write(ctx, id, + MLX5_RXP_CSR_MAX_LATENCY, 0); + ret |= mlx5_devx_regex_register_write(ctx, id, + MLX5_RXP_CSR_MAX_PRI_THREAD, 0); + return ret; +} + +int +mlx5_regex_configure(struct rte_regexdev *dev, + const struct rte_regexdev_config *cfg) +{ + struct mlx5_regex_priv *priv = dev->data->dev_private; + int ret; + uint8_t id; + + priv->nb_queues = cfg->nb_queue_pairs; + priv->qps = rte_zmalloc(NULL, sizeof(struct mlx5_regex_qp) * + priv->nb_queues, 0); + if (!priv->nb_queues) { + DRV_LOG(ERR, "can't allocate qps memory"); + rte_errno = ENOMEM; + return -rte_errno; + } + priv->nb_max_matches = cfg->nb_max_matches; + for (id = 0; id < 2; id++) { + ret = rxp_stop_engine(priv->ctx, id); + if (ret) { + DRV_LOG(ERR, "can't stop engine."); + rte_errno = ENODEV; + return -rte_errno; + } + ret = rxp_init(priv, id); + if (ret) { + DRV_LOG(ERR, "can't init engine."); + rte_errno = ENODEV; + return -rte_errno; + } + ret = mlx5_devx_regex_register_write(priv->ctx, id, + MLX5_RXP_CSR_MAX_MATCH, + priv->nb_max_matches); + if (ret) { + DRV_LOG(ERR, "can't update number of matches."); + rte_errno = ENODEV; + goto configure_error; + } + ret = rxp_start_engine(priv->ctx, id); + if (ret) { + DRV_LOG(ERR, "can't start engine."); + rte_errno = ENODEV; + goto configure_error; + } + + } + return 0; +configure_error: + if (priv->qps) + rte_free(priv->qps); + return -rte_errno; +}