From patchwork Wed Jan 29 10:09:05 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Matan Azrad X-Patchwork-Id: 65297 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id D953CA0531; Wed, 29 Jan 2020 11:11:14 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id D15871C00D; Wed, 29 Jan 2020 11:10:18 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 526EF1BFFA for ; Wed, 29 Jan 2020 11:10:15 +0100 (CET) Received: from Internal Mail-Server by MTLPINE2 (envelope-from asafp@mellanox.com) with ESMTPS (AES256-SHA encrypted); 29 Jan 2020 12:10:14 +0200 Received: from pegasus07.mtr.labs.mlnx (pegasus07.mtr.labs.mlnx [10.210.16.112]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 00TA9BHP032108; Wed, 29 Jan 2020 12:10:14 +0200 From: Matan Azrad To: dev@dpdk.org, Viacheslav Ovsiienko Cc: Maxime Coquelin Date: Wed, 29 Jan 2020 10:09:05 +0000 Message-Id: <1580292549-27439-10-git-send-email-matan@mellanox.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1580292549-27439-1-git-send-email-matan@mellanox.com> References: <1579539790-3882-1-git-send-email-matan@mellanox.com> <1580292549-27439-1-git-send-email-matan@mellanox.com> Subject: [dpdk-dev] [PATCH v2 09/13] vdpa/mlx5: support queue state operation X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add support for set_vring_state operation. Using DevX API the virtq state can be changed as described in PRM: enable - move to ready state. disable - move to suspend state. Signed-off-by: Matan Azrad Acked-by: Viacheslav Ovsiienko Acked-by: Maxime Coquelin --- drivers/vdpa/mlx5/mlx5_vdpa.c | 23 ++++++++++++++++++++++- drivers/vdpa/mlx5/mlx5_vdpa.h | 15 +++++++++++++++ drivers/vdpa/mlx5/mlx5_vdpa_steer.c | 22 ++++++++++++++++++++-- drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 25 +++++++++++++++++++++---- 4 files changed, 78 insertions(+), 7 deletions(-) diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c index 12cfee2..71189c4 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa.c +++ b/drivers/vdpa/mlx5/mlx5_vdpa.c @@ -106,13 +106,34 @@ return 0; } +static int +mlx5_vdpa_set_vring_state(int vid, int vring, int state) +{ + int did = rte_vhost_get_vdpa_device_id(vid); + struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did); + struct mlx5_vdpa_virtq *virtq = NULL; + + if (priv == NULL) { + DRV_LOG(ERR, "Invalid device id: %d.", did); + return -EINVAL; + } + SLIST_FOREACH(virtq, &priv->virtq_list, next) + if (virtq->index == vring) + break; + if (!virtq) { + DRV_LOG(ERR, "Invalid or unconfigured vring id: %d.", vring); + return -EINVAL; + } + return mlx5_vdpa_virtq_enable(virtq, state); +} + static struct rte_vdpa_dev_ops mlx5_vdpa_ops = { .get_queue_num = mlx5_vdpa_get_queue_num, .get_features = mlx5_vdpa_get_vdpa_features, .get_protocol_features = mlx5_vdpa_get_protocol_features, .dev_conf = NULL, .dev_close = NULL, - .set_vring_state = NULL, + .set_vring_state = mlx5_vdpa_set_vring_state, .set_features = NULL, .migration_done = NULL, .get_vfio_group_fd = NULL, diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h index 2b0b285..383a33e 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa.h +++ b/drivers/vdpa/mlx5/mlx5_vdpa.h @@ -64,8 +64,10 @@ struct mlx5_vdpa_query_mr { struct mlx5_vdpa_virtq { SLIST_ENTRY(mlx5_vdpa_virtq) next; + uint8_t enable; uint16_t index; uint16_t vq_size; + struct mlx5_vdpa_priv *priv; struct mlx5_devx_obj *virtq; struct mlx5_vdpa_event_qp eqp; struct { @@ -207,6 +209,19 @@ int mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n, int mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv); /** + * Enable\Disable virtq.. + * + * @param[in] virtq + * The vdpa driver private virtq structure. + * @param[in] enable + * Set to enable, otherwise disable. + * + * @return + * 0 on success, a negative value otherwise. + */ +int mlx5_vdpa_virtq_enable(struct mlx5_vdpa_virtq *virtq, int enable); + +/** * Unset steering and release all its related resources- stop traffic. * * @param[in] priv diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c index f365c10..36017f1 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c +++ b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c @@ -73,7 +73,7 @@ } #define MLX5_VDPA_DEFAULT_RQT_SIZE 512 -static int __rte_unused +static int mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv) { struct mlx5_vdpa_virtq *virtq; @@ -91,7 +91,8 @@ return -ENOMEM; } SLIST_FOREACH(virtq, &priv->virtq_list, next) { - if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) { + if (is_virtq_recvq(virtq->index, priv->nr_virtqs) && + virtq->enable) { attr->rq_list[i] = virtq->virtq->id; i++; } @@ -116,6 +117,23 @@ return ret; } +int +mlx5_vdpa_virtq_enable(struct mlx5_vdpa_virtq *virtq, int enable) +{ + struct mlx5_vdpa_priv *priv = virtq->priv; + int ret = 0; + + if (virtq->enable == !!enable) + return 0; + virtq->enable = !!enable; + if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) { + ret = mlx5_vdpa_rqt_prepare(priv); + if (ret) + virtq->enable = !enable; + } + return ret; +} + static int __rte_unused mlx5_vdpa_rss_flows_create(struct mlx5_vdpa_priv *priv) { diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c index e27af28..60aa040 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c +++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c @@ -15,13 +15,13 @@ static int mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq) { - int i; + unsigned int; if (virtq->virtq) { claim_zero(mlx5_devx_cmd_destroy(virtq->virtq)); virtq->virtq = NULL; } - for (i = 0; i < 3; ++i) { + for (i = 0; i < RTE_DIM(virtq->umems); ++i) { if (virtq->umems[i].obj) claim_zero(mlx5_glue->devx_umem_dereg (virtq->umems[i].obj)); @@ -60,6 +60,19 @@ priv->features = 0; } +static int +mlx5_vdpa_virtq_modify(struct mlx5_vdpa_virtq *virtq, int state) +{ + struct mlx5_devx_virtq_attr attr = { + .type = MLX5_VIRTQ_MODIFY_TYPE_STATE, + .state = state ? MLX5_VIRTQ_STATE_RDY : + MLX5_VIRTQ_STATE_SUSPEND, + .queue_index = virtq->index, + }; + + return mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr); +} + static uint64_t mlx5_vdpa_hva_to_gpa(struct rte_vhost_memory *mem, uint64_t hva) { @@ -86,7 +99,7 @@ struct mlx5_devx_virtq_attr attr = {0}; uint64_t gpa; int ret; - int i; + unsigned i; uint16_t last_avail_idx; uint16_t last_used_idx; @@ -125,7 +138,7 @@ " need event QPs and event mechanism.", index); } /* Setup 3 UMEMs for each virtq. */ - for (i = 0; i < 3; ++i) { + for (i = 0; i < RTE_DIM(virtq->umems); ++i) { virtq->umems[i].size = priv->caps.umems[i].a * vq.size + priv->caps.umems[i].b; virtq->umems[i].buf = rte_zmalloc(__func__, @@ -182,8 +195,12 @@ attr.tis_id = priv->tis->id; attr.queue_index = index; virtq->virtq = mlx5_devx_cmd_create_virtq(priv->ctx, &attr); + virtq->priv = priv; if (!virtq->virtq) goto error; + if (mlx5_vdpa_virtq_modify(virtq, 1)) + goto error; + virtq->enable = 1; return 0; error: mlx5_vdpa_virtq_unset(virtq);