From patchwork Tue Mar 31 11:12:21 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Matan Azrad X-Patchwork-Id: 67474 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 2D3D6A0562; Tue, 31 Mar 2020 13:12:53 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id A0B5F1C032; Tue, 31 Mar 2020 13:12:38 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id F22B11BFDC for ; Tue, 31 Mar 2020 13:12:36 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from matan@mellanox.com) with ESMTPS (AES256-SHA encrypted); 31 Mar 2020 14:12:32 +0300 Received: from pegasus07.mtr.labs.mlnx (pegasus07.mtr.labs.mlnx [10.210.16.112]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 02VBCWIH019432; Tue, 31 Mar 2020 14:12:32 +0300 From: Matan Azrad To: dev@dpdk.org Cc: Viacheslav Ovsiienko , Shahaf Shuler , Maxime Coquelin Date: Tue, 31 Mar 2020 11:12:21 +0000 Message-Id: <1585653143-21987-2-git-send-email-matan@mellanox.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1585653143-21987-1-git-send-email-matan@mellanox.com> References: <1585653143-21987-1-git-send-email-matan@mellanox.com> Subject: [dpdk-dev] [PATCH 1/3] vdpa/mlx5: manage virtqs by array X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" As a preparation to listen the virtqs status before the device is configured, manage the virtqs structures in array instead of list. Signed-off-by: Matan Azrad Acked-by: Viacheslav Ovsiienko Reviewed-by: Maxime Coquelin --- drivers/vdpa/mlx5/mlx5_vdpa.c | 43 ++++++++++++++++------------------ drivers/vdpa/mlx5/mlx5_vdpa.h | 2 +- drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 43 ++++++++++++++++------------------ drivers/vdpa/mlx5/mlx5_vdpa_steer.c | 18 +++++++-------- drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 46 +++++++++++++++---------------------- 5 files changed, 68 insertions(+), 84 deletions(-) diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c index f10647b..b22f074 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa.c +++ b/drivers/vdpa/mlx5/mlx5_vdpa.c @@ -116,20 +116,18 @@ { int did = rte_vhost_get_vdpa_device_id(vid); struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did); - struct mlx5_vdpa_virtq *virtq = NULL; if (priv == NULL) { DRV_LOG(ERR, "Invalid device id: %d.", did); return -EINVAL; } - SLIST_FOREACH(virtq, &priv->virtq_list, next) - if (virtq->index == vring) - break; - if (!virtq) { + if (!priv->configured || vring >= RTE_MIN((int)priv->nr_virtqs, + (int)priv->caps.max_num_virtio_queues * 2) || + !priv->virtqs[vring].virtq) { DRV_LOG(ERR, "Invalid or unconfigured vring id: %d.", vring); return -EINVAL; } - return mlx5_vdpa_virtq_enable(virtq, state); + return mlx5_vdpa_virtq_enable(&priv->virtqs[vring], state); } static int @@ -482,28 +480,28 @@ rte_errno = ENODEV; return -rte_errno; } - priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv), - RTE_CACHE_LINE_SIZE); - if (!priv) { - DRV_LOG(ERR, "Failed to allocate private memory."); - rte_errno = ENOMEM; - goto error; - } ret = mlx5_devx_cmd_query_hca_attr(ctx, &attr); if (ret) { DRV_LOG(ERR, "Unable to read HCA capabilities."); rte_errno = ENOTSUP; goto error; - } else { - if (!attr.vdpa.valid || !attr.vdpa.max_num_virtio_queues) { - DRV_LOG(ERR, "Not enough capabilities to support vdpa," - " maybe old FW/OFED version?"); - rte_errno = ENOTSUP; - goto error; - } - priv->caps = attr.vdpa; - priv->log_max_rqt_size = attr.log_max_rqt_size; + } else if (!attr.vdpa.valid || !attr.vdpa.max_num_virtio_queues) { + DRV_LOG(ERR, "Not enough capabilities to support vdpa, maybe " + "old FW/OFED version?"); + rte_errno = ENOTSUP; + goto error; + } + priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv) + + sizeof(struct mlx5_vdpa_virtq) * + attr.vdpa.max_num_virtio_queues * 2, + RTE_CACHE_LINE_SIZE); + if (!priv) { + DRV_LOG(ERR, "Failed to allocate private memory."); + rte_errno = ENOMEM; + goto error; } + priv->caps = attr.vdpa; + priv->log_max_rqt_size = attr.log_max_rqt_size; priv->ctx = ctx; priv->dev_addr.pci_addr = pci_dev->addr; priv->dev_addr.type = PCI_ADDR; @@ -519,7 +517,6 @@ goto error; } SLIST_INIT(&priv->mr_list); - SLIST_INIT(&priv->virtq_list); pthread_mutex_lock(&priv_list_lock); TAILQ_INSERT_TAIL(&priv_list, priv, next); pthread_mutex_unlock(&priv_list_lock); diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h index 75af410..baec106 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa.h +++ b/drivers/vdpa/mlx5/mlx5_vdpa.h @@ -120,11 +120,11 @@ struct mlx5_vdpa_priv { uint16_t nr_virtqs; uint64_t features; /* Negotiated features. */ uint16_t log_max_rqt_size; - SLIST_HEAD(virtq_list, mlx5_vdpa_virtq) virtq_list; struct mlx5_vdpa_steer steer; struct mlx5dv_var *var; void *virtq_db_addr; SLIST_HEAD(mr_list, mlx5_vdpa_query_mr) mr_list; + struct mlx5_vdpa_virtq virtqs[]; }; /** diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c index 4457760..77f2eda 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c +++ b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c @@ -15,13 +15,12 @@ .type = MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_DUMP_ENABLE, .dirty_bitmap_dump_enable = enable, }; - struct mlx5_vdpa_virtq *virtq; + int i; - SLIST_FOREACH(virtq, &priv->virtq_list, next) { - attr.queue_index = virtq->index; - if (mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr)) { - DRV_LOG(ERR, "Failed to modify virtq %d logging.", - virtq->index); + for (i = 0; i < priv->nr_virtqs; ++i) { + attr.queue_index = i; + if (mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq, &attr)) { + DRV_LOG(ERR, "Failed to modify virtq %d logging.", i); return -1; } } @@ -47,7 +46,7 @@ .dirty_bitmap_size = log_size, }; struct mlx5_vdpa_query_mr *mr = rte_malloc(__func__, sizeof(*mr), 0); - struct mlx5_vdpa_virtq *virtq; + int i; if (!mr) { DRV_LOG(ERR, "Failed to allocate mem for lm mr."); @@ -67,11 +66,10 @@ goto err; } attr.dirty_bitmap_mkey = mr->mkey->id; - SLIST_FOREACH(virtq, &priv->virtq_list, next) { - attr.queue_index = virtq->index; - if (mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr)) { - DRV_LOG(ERR, "Failed to modify virtq %d for lm.", - virtq->index); + for (i = 0; i < priv->nr_virtqs; ++i) { + attr.queue_index = i; + if (mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq, &attr)) { + DRV_LOG(ERR, "Failed to modify virtq %d for lm.", i); goto err; } } @@ -94,9 +92,9 @@ mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv) { struct mlx5_devx_virtq_attr attr = {0}; - struct mlx5_vdpa_virtq *virtq; uint64_t features; int ret = rte_vhost_get_negotiated_features(priv->vid, &features); + int i; if (ret) { DRV_LOG(ERR, "Failed to get negotiated features."); @@ -104,27 +102,26 @@ } if (!RTE_VHOST_NEED_LOG(features)) return 0; - SLIST_FOREACH(virtq, &priv->virtq_list, next) { - ret = mlx5_vdpa_virtq_modify(virtq, 0); + for (i = 0; i < priv->nr_virtqs; ++i) { + ret = mlx5_vdpa_virtq_modify(&priv->virtqs[i], 0); if (ret) return -1; - if (mlx5_devx_cmd_query_virtq(virtq->virtq, &attr)) { - DRV_LOG(ERR, "Failed to query virtq %d.", virtq->index); + if (mlx5_devx_cmd_query_virtq(priv->virtqs[i].virtq, &attr)) { + DRV_LOG(ERR, "Failed to query virtq %d.", i); return -1; } DRV_LOG(INFO, "Query vid %d vring %d: hw_available_idx=%d, " - "hw_used_index=%d", priv->vid, virtq->index, + "hw_used_index=%d", priv->vid, i, attr.hw_available_index, attr.hw_used_index); - ret = rte_vhost_set_vring_base(priv->vid, virtq->index, + ret = rte_vhost_set_vring_base(priv->vid, i, attr.hw_available_index, attr.hw_used_index); if (ret) { - DRV_LOG(ERR, "Failed to set virtq %d base.", - virtq->index); + DRV_LOG(ERR, "Failed to set virtq %d base.", i); return -1; } - rte_vhost_log_used_vring(priv->vid, virtq->index, 0, - MLX5_VDPA_USED_RING_LEN(virtq->vq_size)); + rte_vhost_log_used_vring(priv->vid, i, 0, + MLX5_VDPA_USED_RING_LEN(priv->virtqs[i].vq_size)); } return 0; } diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c index 9c11452..96ffc21 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c +++ b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c @@ -76,13 +76,13 @@ static int mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv) { - struct mlx5_vdpa_virtq *virtq; + int i; uint32_t rqt_n = RTE_MIN(MLX5_VDPA_DEFAULT_RQT_SIZE, 1 << priv->log_max_rqt_size); struct mlx5_devx_rqt_attr *attr = rte_zmalloc(__func__, sizeof(*attr) + rqt_n * sizeof(uint32_t), 0); - uint32_t i = 0, j; + uint32_t k = 0, j; int ret = 0; if (!attr) { @@ -90,15 +90,15 @@ rte_errno = ENOMEM; return -ENOMEM; } - SLIST_FOREACH(virtq, &priv->virtq_list, next) { - if (is_virtq_recvq(virtq->index, priv->nr_virtqs) && - virtq->enable) { - attr->rq_list[i] = virtq->virtq->id; - i++; + for (i = 0; i < priv->nr_virtqs; i++) { + if (is_virtq_recvq(i, priv->nr_virtqs) && + priv->virtqs[i].enable) { + attr->rq_list[k] = priv->virtqs[i].virtq->id; + k++; } } - for (j = 0; i != rqt_n; ++i, ++j) - attr->rq_list[i] = attr->rq_list[j]; + for (j = 0; k != rqt_n; ++k, ++j) + attr->rq_list[k] = attr->rq_list[j]; attr->rq_type = MLX5_INLINE_Q_TYPE_VIRTQ; attr->rqt_max_size = rqt_n; attr->rqt_actual_size = rqt_n; diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c index 8bebb92..3575272 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c +++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c @@ -59,12 +59,9 @@ usleep(MLX5_VDPA_INTR_RETRIES_USEC); } } - virtq->intr_handle.fd = -1; } - if (virtq->virtq) { + if (virtq->virtq) claim_zero(mlx5_devx_cmd_destroy(virtq->virtq)); - virtq->virtq = NULL; - } for (i = 0; i < RTE_DIM(virtq->umems); ++i) { if (virtq->umems[i].obj) claim_zero(mlx5_glue->devx_umem_dereg @@ -72,27 +69,20 @@ if (virtq->umems[i].buf) rte_free(virtq->umems[i].buf); } - memset(&virtq->umems, 0, sizeof(virtq->umems)); if (virtq->eqp.fw_qp) mlx5_vdpa_event_qp_destroy(&virtq->eqp); + memset(virtq, 0, sizeof(*virtq)); + virtq->intr_handle.fd = -1; return 0; } void mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv) { - struct mlx5_vdpa_virtq *entry; - struct mlx5_vdpa_virtq *next; + int i; - entry = SLIST_FIRST(&priv->virtq_list); - while (entry) { - next = SLIST_NEXT(entry, next); - mlx5_vdpa_virtq_unset(entry); - SLIST_REMOVE(&priv->virtq_list, entry, mlx5_vdpa_virtq, next); - rte_free(entry); - entry = next; - } - SLIST_INIT(&priv->virtq_list); + for (i = 0; i < priv->nr_virtqs; i++) + mlx5_vdpa_virtq_unset(&priv->virtqs[i]); if (priv->tis) { claim_zero(mlx5_devx_cmd_destroy(priv->tis)); priv->tis = NULL; @@ -106,6 +96,7 @@ priv->virtq_db_addr = NULL; } priv->features = 0; + priv->nr_virtqs = 0; } int @@ -140,9 +131,9 @@ } static int -mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, - struct mlx5_vdpa_virtq *virtq, int index) +mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index) { + struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index]; struct rte_vhost_vring vq; struct mlx5_devx_virtq_attr attr = {0}; uint64_t gpa; @@ -340,7 +331,6 @@ mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv) { struct mlx5_devx_tis_attr tis_attr = {0}; - struct mlx5_vdpa_virtq *virtq; uint32_t i; uint16_t nr_vring = rte_vhost_get_vring_num(priv->vid); int ret = rte_vhost_get_negotiated_features(priv->vid, &priv->features); @@ -349,6 +339,12 @@ DRV_LOG(ERR, "Failed to configure negotiated features."); return -1; } + if (nr_vring > priv->caps.max_num_virtio_queues * 2) { + DRV_LOG(ERR, "Do not support more than %d virtqs(%d).", + (int)priv->caps.max_num_virtio_queues * 2, + (int)nr_vring); + return -E2BIG; + } /* Always map the entire page. */ priv->virtq_db_addr = mmap(NULL, priv->var->length, PROT_READ | PROT_WRITE, MAP_SHARED, priv->ctx->cmd_fd, @@ -372,16 +368,10 @@ DRV_LOG(ERR, "Failed to create TIS."); goto error; } - for (i = 0; i < nr_vring; i++) { - virtq = rte_zmalloc(__func__, sizeof(*virtq), 0); - if (!virtq || mlx5_vdpa_virtq_setup(priv, virtq, i)) { - if (virtq) - rte_free(virtq); - goto error; - } - SLIST_INSERT_HEAD(&priv->virtq_list, virtq, next); - } priv->nr_virtqs = nr_vring; + for (i = 0; i < nr_vring; i++) + if (mlx5_vdpa_virtq_setup(priv, i)) + goto error; return 0; error: mlx5_vdpa_virtqs_release(priv); From patchwork Tue Mar 31 11:12:22 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Matan Azrad X-Patchwork-Id: 67473 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 1B2D8A0562; Tue, 31 Mar 2020 13:12:43 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id E1FA91BFD9; Tue, 31 Mar 2020 13:12:36 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id B5A982BCE for ; Tue, 31 Mar 2020 13:12:34 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE2 (envelope-from matan@mellanox.com) with ESMTPS (AES256-SHA encrypted); 31 Mar 2020 14:12:32 +0300 Received: from pegasus07.mtr.labs.mlnx (pegasus07.mtr.labs.mlnx [10.210.16.112]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 02VBCWII019432; Tue, 31 Mar 2020 14:12:32 +0300 From: Matan Azrad To: dev@dpdk.org Cc: Viacheslav Ovsiienko , Shahaf Shuler , Maxime Coquelin Date: Tue, 31 Mar 2020 11:12:22 +0000 Message-Id: <1585653143-21987-3-git-send-email-matan@mellanox.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1585653143-21987-1-git-send-email-matan@mellanox.com> References: <1585653143-21987-1-git-send-email-matan@mellanox.com> Subject: [dpdk-dev] [PATCH 2/3] vdpa/mlx5: separate virtq stop X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" In live migration, before loging the virtq, the driver queries the virtq indexes after moving it to suspend mode. Separate this method to new function mlx5_vdpa_virtq_stop as a preparation for reusing. Signed-off-by: Matan Azrad Acked-by: Viacheslav Ovsiienko Reviewed-by: Maxime Coquelin --- drivers/vdpa/mlx5/mlx5_vdpa.h | 13 +++++++++++++ drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 17 ++--------------- drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 26 ++++++++++++++++++++++++++ 3 files changed, 41 insertions(+), 15 deletions(-) diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h index baec106..0edd688 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa.h +++ b/drivers/vdpa/mlx5/mlx5_vdpa.h @@ -308,4 +308,17 @@ int mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base, */ int mlx5_vdpa_virtq_modify(struct mlx5_vdpa_virtq *virtq, int state); +/** + * Stop virtq before destroying it. + * + * @param[in] priv + * The vdpa driver private structure. + * @param[in] index + * The virtq index. + * + * @return + * 0 on success, a negative value otherwise. + */ +int mlx5_vdpa_virtq_stop(struct mlx5_vdpa_priv *priv, int index); + #endif /* RTE_PMD_MLX5_VDPA_H_ */ diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c index 77f2eda..26b7ce1 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c +++ b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c @@ -91,7 +91,6 @@ int mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv) { - struct mlx5_devx_virtq_attr attr = {0}; uint64_t features; int ret = rte_vhost_get_negotiated_features(priv->vid, &features); int i; @@ -103,21 +102,9 @@ if (!RTE_VHOST_NEED_LOG(features)) return 0; for (i = 0; i < priv->nr_virtqs; ++i) { - ret = mlx5_vdpa_virtq_modify(&priv->virtqs[i], 0); - if (ret) - return -1; - if (mlx5_devx_cmd_query_virtq(priv->virtqs[i].virtq, &attr)) { - DRV_LOG(ERR, "Failed to query virtq %d.", i); - return -1; - } - DRV_LOG(INFO, "Query vid %d vring %d: hw_available_idx=%d, " - "hw_used_index=%d", priv->vid, i, - attr.hw_available_index, attr.hw_used_index); - ret = rte_vhost_set_vring_base(priv->vid, i, - attr.hw_available_index, - attr.hw_used_index); + ret = mlx5_vdpa_virtq_stop(priv, i); if (ret) { - DRV_LOG(ERR, "Failed to set virtq %d base.", i); + DRV_LOG(ERR, "Failed to stop virtq %d.", i); return -1; } rte_vhost_log_used_vring(priv->vid, i, 0, diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c index 3575272..0bb6416 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c +++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c @@ -112,6 +112,32 @@ return mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr); } +int +mlx5_vdpa_virtq_stop(struct mlx5_vdpa_priv *priv, int index) +{ + struct mlx5_devx_virtq_attr attr = {0}; + struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index]; + int ret = mlx5_vdpa_virtq_modify(virtq, 0); + + if (ret) + return -1; + if (mlx5_devx_cmd_query_virtq(virtq->virtq, &attr)) { + DRV_LOG(ERR, "Failed to query virtq %d.", index); + return -1; + } + DRV_LOG(INFO, "Query vid %d vring %d: hw_available_idx=%d, " + "hw_used_index=%d", priv->vid, index, + attr.hw_available_index, attr.hw_used_index); + ret = rte_vhost_set_vring_base(priv->vid, index, + attr.hw_available_index, + attr.hw_used_index); + if (ret) { + DRV_LOG(ERR, "Failed to set virtq %d base.", index); + return -1; + } + return 0; +} + static uint64_t mlx5_vdpa_hva_to_gpa(struct rte_vhost_memory *mem, uint64_t hva) { From patchwork Tue Mar 31 11:12:23 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Matan Azrad X-Patchwork-Id: 67475 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 76BFFA0562; Tue, 31 Mar 2020 13:13:00 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id F10DB1C065; Tue, 31 Mar 2020 13:12:39 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 177D51C02A for ; Tue, 31 Mar 2020 13:12:36 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from matan@mellanox.com) with ESMTPS (AES256-SHA encrypted); 31 Mar 2020 14:12:32 +0300 Received: from pegasus07.mtr.labs.mlnx (pegasus07.mtr.labs.mlnx [10.210.16.112]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 02VBCWIJ019432; Tue, 31 Mar 2020 14:12:32 +0300 From: Matan Azrad To: dev@dpdk.org Cc: Viacheslav Ovsiienko , Shahaf Shuler , Maxime Coquelin Date: Tue, 31 Mar 2020 11:12:23 +0000 Message-Id: <1585653143-21987-4-git-send-email-matan@mellanox.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1585653143-21987-1-git-send-email-matan@mellanox.com> References: <1585653143-21987-1-git-send-email-matan@mellanox.com> Subject: [dpdk-dev] [PATCH 3/3] vdpa/mlx5: recteate a virtq becoming enabled X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The virtq configuarations may be changed when it moves from disabled state to enabled state. Listen to the state callback even if the device is not configured. Recreate the virtq when it moves from disabled state to enabled state and when the device is configured. Signed-off-by: Matan Azrad Acked-by: Viacheslav Ovsiienko Reviewed-by: Maxime Coquelin --- drivers/vdpa/mlx5/mlx5_vdpa.c | 12 +++-- drivers/vdpa/mlx5/mlx5_vdpa.h | 39 +++++++++++++++-- drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 17 +++++--- drivers/vdpa/mlx5/mlx5_vdpa_steer.c | 87 ++++++++++++++++++------------------- drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 59 ++++++++++++++++++++++--- 5 files changed, 146 insertions(+), 68 deletions(-) diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c index b22f074..fe17ced 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa.c +++ b/drivers/vdpa/mlx5/mlx5_vdpa.c @@ -121,13 +121,11 @@ DRV_LOG(ERR, "Invalid device id: %d.", did); return -EINVAL; } - if (!priv->configured || vring >= RTE_MIN((int)priv->nr_virtqs, - (int)priv->caps.max_num_virtio_queues * 2) || - !priv->virtqs[vring].virtq) { - DRV_LOG(ERR, "Invalid or unconfigured vring id: %d.", vring); - return -EINVAL; + if (vring >= (int)priv->caps.max_num_virtio_queues * 2) { + DRV_LOG(ERR, "Too big vring id: %d.", vring); + return -E2BIG; } - return mlx5_vdpa_virtq_enable(&priv->virtqs[vring], state); + return mlx5_vdpa_virtq_enable(priv, vring, state); } static int @@ -206,7 +204,7 @@ if (priv->configured) ret |= mlx5_vdpa_lm_log(priv); mlx5_vdpa_cqe_event_unset(priv); - ret |= mlx5_vdpa_steer_unset(priv); + mlx5_vdpa_steer_unset(priv); mlx5_vdpa_virtqs_release(priv); mlx5_vdpa_event_qp_global_release(priv); mlx5_vdpa_mem_dereg(priv); diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h index 0edd688..fcc216a 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa.h +++ b/drivers/vdpa/mlx5/mlx5_vdpa.h @@ -127,6 +127,24 @@ struct mlx5_vdpa_priv { struct mlx5_vdpa_virtq virtqs[]; }; +/* + * Check whether virtq is for traffic receive. + * According to VIRTIO_NET Spec the virtqueues index identity its type by: + * 0 receiveq1 + * 1 transmitq1 + * ... + * 2(N-1) receiveqN + * 2(N-1)+1 transmitqN + * 2N controlq + */ +static inline uint8_t +is_virtq_recvq(int virtq_index, int nr_vring) +{ + if (virtq_index % 2 == 0 && virtq_index != nr_vring - 1) + return 1; + return 0; +} + /** * Release all the prepared memory regions and all their related resources. * @@ -223,15 +241,17 @@ int mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n, /** * Enable\Disable virtq.. * - * @param[in] virtq - * The vdpa driver private virtq structure. + * @param[in] priv + * The vdpa driver private structure. + * @param[in] index + * The virtq index. * @param[in] enable * Set to enable, otherwise disable. * * @return * 0 on success, a negative value otherwise. */ -int mlx5_vdpa_virtq_enable(struct mlx5_vdpa_virtq *virtq, int enable); +int mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable); /** * Unset steering and release all its related resources- stop traffic. @@ -239,7 +259,18 @@ int mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n, * @param[in] priv * The vdpa driver private structure. */ -int mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv); +void mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv); + +/** + * Update steering according to the received queues status. + * + * @param[in] priv + * The vdpa driver private structure. + * + * @return + * 0 on success, a negative value otherwise. + */ +int mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv); /** * Setup steering and all its related resources to enable RSS traffic from the diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c index 26b7ce1..460e01d 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c +++ b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c @@ -19,7 +19,8 @@ for (i = 0; i < priv->nr_virtqs; ++i) { attr.queue_index = i; - if (mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq, &attr)) { + if (!priv->virtqs[i].virtq || + mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq, &attr)) { DRV_LOG(ERR, "Failed to modify virtq %d logging.", i); return -1; } @@ -68,7 +69,8 @@ attr.dirty_bitmap_mkey = mr->mkey->id; for (i = 0; i < priv->nr_virtqs; ++i) { attr.queue_index = i; - if (mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq, &attr)) { + if (!priv->virtqs[i].virtq || + mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq, &attr)) { DRV_LOG(ERR, "Failed to modify virtq %d for lm.", i); goto err; } @@ -102,9 +104,14 @@ if (!RTE_VHOST_NEED_LOG(features)) return 0; for (i = 0; i < priv->nr_virtqs; ++i) { - ret = mlx5_vdpa_virtq_stop(priv, i); - if (ret) { - DRV_LOG(ERR, "Failed to stop virtq %d.", i); + if (priv->virtqs[i].virtq) { + ret = mlx5_vdpa_virtq_stop(priv, i); + if (ret) { + DRV_LOG(ERR, "Failed to stop virtq %d.", i); + return -1; + } + } else { + DRV_LOG(ERR, "virtq %d is not created.", i); return -1; } rte_vhost_log_used_vring(priv->vid, i, 0, diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c index 96ffc21..406c7be 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c +++ b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c @@ -12,10 +12,9 @@ #include "mlx5_vdpa_utils.h" #include "mlx5_vdpa.h" -int -mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv) +static void +mlx5_vdpa_rss_flows_destroy(struct mlx5_vdpa_priv *priv) { - int ret __rte_unused; unsigned i; for (i = 0; i < RTE_DIM(priv->steer.rss); ++i) { @@ -40,6 +39,12 @@ priv->steer.rss[i].matcher = NULL; } } +} + +void +mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv) +{ + mlx5_vdpa_rss_flows_destroy(priv); if (priv->steer.tbl) { claim_zero(mlx5_glue->dr_destroy_flow_tbl(priv->steer.tbl)); priv->steer.tbl = NULL; @@ -52,27 +57,13 @@ claim_zero(mlx5_devx_cmd_destroy(priv->steer.rqt)); priv->steer.rqt = NULL; } - return 0; } +#define MLX5_VDPA_DEFAULT_RQT_SIZE 512 /* - * According to VIRTIO_NET Spec the virtqueues index identity its type by: - * 0 receiveq1 - * 1 transmitq1 - * ... - * 2(N-1) receiveqN - * 2(N-1)+1 transmitqN - * 2N controlq + * Return the number of queues configured to the table on success, otherwise + * -1 on error. */ -static uint8_t -is_virtq_recvq(int virtq_index, int nr_vring) -{ - if (virtq_index % 2 == 0 && virtq_index != nr_vring - 1) - return 1; - return 0; -} - -#define MLX5_VDPA_DEFAULT_RQT_SIZE 512 static int mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv) { @@ -83,7 +74,7 @@ + rqt_n * sizeof(uint32_t), 0); uint32_t k = 0, j; - int ret = 0; + int ret = 0, num; if (!attr) { DRV_LOG(ERR, "Failed to allocate RQT attributes memory."); @@ -92,11 +83,15 @@ } for (i = 0; i < priv->nr_virtqs; i++) { if (is_virtq_recvq(i, priv->nr_virtqs) && - priv->virtqs[i].enable) { + priv->virtqs[i].enable && priv->virtqs[i].virtq) { attr->rq_list[k] = priv->virtqs[i].virtq->id; k++; } } + if (k == 0) + /* No enabled RQ to configure for RSS. */ + return 0; + num = (int)k; for (j = 0; k != rqt_n; ++k, ++j) attr->rq_list[k] = attr->rq_list[j]; attr->rq_type = MLX5_INLINE_Q_TYPE_VIRTQ; @@ -114,26 +109,7 @@ DRV_LOG(ERR, "Failed to modify RQT."); } rte_free(attr); - return ret; -} - -int -mlx5_vdpa_virtq_enable(struct mlx5_vdpa_virtq *virtq, int enable) -{ - struct mlx5_vdpa_priv *priv = virtq->priv; - int ret = 0; - - DRV_LOG(INFO, "Update virtq %d status %sable -> %sable.", virtq->index, - virtq->enable ? "en" : "dis", enable ? "en" : "dis"); - if (virtq->enable == !!enable) - return 0; - virtq->enable = !!enable; - if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) { - ret = mlx5_vdpa_rqt_prepare(priv); - if (ret) - virtq->enable = !enable; - } - return ret; + return ret ? -1 : num; } static int __rte_unused @@ -262,11 +238,32 @@ } int +mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv) +{ + int ret = mlx5_vdpa_rqt_prepare(priv); + + if (ret == 0) { + mlx5_vdpa_rss_flows_destroy(priv); + if (priv->steer.rqt) { + claim_zero(mlx5_devx_cmd_destroy(priv->steer.rqt)); + priv->steer.rqt = NULL; + } + } else if (ret < 0) { + return ret; + } else if (!priv->steer.rss[0].flow) { + ret = mlx5_vdpa_rss_flows_create(priv); + if (ret) { + DRV_LOG(ERR, "Cannot create RSS flows."); + return -1; + } + } + return 0; +} + +int mlx5_vdpa_steer_setup(struct mlx5_vdpa_priv *priv) { #ifdef HAVE_MLX5DV_DR - if (mlx5_vdpa_rqt_prepare(priv)) - return -1; priv->steer.domain = mlx5_glue->dr_create_domain(priv->ctx, MLX5DV_DR_DOMAIN_TYPE_NIC_RX); if (!priv->steer.domain) { @@ -278,7 +275,7 @@ DRV_LOG(ERR, "Failed to create table 0 with Rx domain."); goto error; } - if (mlx5_vdpa_rss_flows_create(priv)) + if (mlx5_vdpa_steer_update(priv)) goto error; return 0; error: diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c index 0bb6416..defb9e1 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c +++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c @@ -59,9 +59,11 @@ usleep(MLX5_VDPA_INTR_RETRIES_USEC); } } + virtq->intr_handle.fd = -1; } if (virtq->virtq) claim_zero(mlx5_devx_cmd_destroy(virtq->virtq)); + virtq->virtq = NULL; for (i = 0; i < RTE_DIM(virtq->umems); ++i) { if (virtq->umems[i].obj) claim_zero(mlx5_glue->devx_umem_dereg @@ -69,10 +71,9 @@ if (virtq->umems[i].buf) rte_free(virtq->umems[i].buf); } + memset(&virtq->umems, 0, sizeof(virtq->umems)); if (virtq->eqp.fw_qp) mlx5_vdpa_event_qp_destroy(&virtq->eqp); - memset(virtq, 0, sizeof(*virtq)); - virtq->intr_handle.fd = -1; return 0; } @@ -81,8 +82,10 @@ { int i; - for (i = 0; i < priv->nr_virtqs; i++) + for (i = 0; i < priv->nr_virtqs; i++) { mlx5_vdpa_virtq_unset(&priv->virtqs[i]); + priv->virtqs[i].enable = 0; + } if (priv->tis) { claim_zero(mlx5_devx_cmd_destroy(priv->tis)); priv->tis = NULL; @@ -265,10 +268,7 @@ goto error; if (mlx5_vdpa_virtq_modify(virtq, 1)) goto error; - virtq->enable = 1; virtq->priv = priv; - /* Be sure notifications are not missed during configuration. */ - claim_zero(rte_vhost_enable_guest_notification(priv->vid, index, 1)); rte_write32(virtq->index, priv->virtq_db_addr); /* Setup doorbell mapping. */ virtq->intr_handle.fd = vq.kickfd; @@ -395,11 +395,56 @@ goto error; } priv->nr_virtqs = nr_vring; - for (i = 0; i < nr_vring; i++) + for (i = 0; i < nr_vring; i++) { + claim_zero(rte_vhost_enable_guest_notification(priv->vid, i, + 1)); if (mlx5_vdpa_virtq_setup(priv, i)) goto error; + } return 0; error: mlx5_vdpa_virtqs_release(priv); return -1; } + +int +mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable) +{ + struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index]; + int ret; + + DRV_LOG(INFO, "Update virtq %d status %sable -> %sable.", index, + virtq->enable ? "en" : "dis", enable ? "en" : "dis"); + if (virtq->enable == !!enable) + return 0; + if (!priv->configured) { + virtq->enable = !!enable; + return 0; + } + if (enable) { + /* Configuration might have been updated - reconfigure virtq. */ + if (virtq->virtq) { + ret = mlx5_vdpa_virtq_stop(priv, index); + if (ret) + DRV_LOG(WARNING, "Failed to stop virtq %d.", + index); + mlx5_vdpa_virtq_unset(virtq); + } + ret = mlx5_vdpa_virtq_setup(priv, index); + if (ret) { + DRV_LOG(ERR, "Failed to setup virtq %d.", index); + return ret; + /* The only case virtq can stay invalid. */ + } + } + virtq->enable = !!enable; + if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) { + /* Need to add received virtq to the RQT table of the TIRs. */ + ret = mlx5_vdpa_steer_update(priv); + if (ret) { + virtq->enable = !enable; + return ret; + } + } + return 0; +}