[v2,11/13] vdpa/mlx5: support live migration
Checks
Commit Message
Add support for live migration feature by the HW:
Create a single Mkey that maps the memory address space of the
VHOST live migration log file.
Modify VIRTIO_NET_Q object and provide vhost_log_page,
dirty_bitmap_mkey, dirty_bitmap_size, dirty_bitmap_addr
and dirty_bitmap_dump_enable.
Modify VIRTIO_NET_Q object and move state to SUSPEND.
Query VIRTIO_NET_Q and get hw_available_idx and hw_used_idx.
Signed-off-by: Matan Azrad <matan@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
---
doc/guides/vdpadevs/features/mlx5.ini | 1 +
drivers/vdpa/mlx5/Makefile | 1 +
drivers/vdpa/mlx5/meson.build | 1 +
drivers/vdpa/mlx5/mlx5_vdpa.c | 44 +++++++++++-
drivers/vdpa/mlx5/mlx5_vdpa.h | 55 ++++++++++++++
drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 130 ++++++++++++++++++++++++++++++++++
drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 7 +-
7 files changed, 236 insertions(+), 3 deletions(-)
create mode 100644 drivers/vdpa/mlx5/mlx5_vdpa_lm.c
Comments
On 1/29/20 11:09 AM, Matan Azrad wrote:
> Add support for live migration feature by the HW:
> Create a single Mkey that maps the memory address space of the
> VHOST live migration log file.
> Modify VIRTIO_NET_Q object and provide vhost_log_page,
> dirty_bitmap_mkey, dirty_bitmap_size, dirty_bitmap_addr
> and dirty_bitmap_dump_enable.
> Modify VIRTIO_NET_Q object and move state to SUSPEND.
> Query VIRTIO_NET_Q and get hw_available_idx and hw_used_idx.
>
> Signed-off-by: Matan Azrad <matan@mellanox.com>
> Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
> ---
> doc/guides/vdpadevs/features/mlx5.ini | 1 +
> drivers/vdpa/mlx5/Makefile | 1 +
> drivers/vdpa/mlx5/meson.build | 1 +
> drivers/vdpa/mlx5/mlx5_vdpa.c | 44 +++++++++++-
> drivers/vdpa/mlx5/mlx5_vdpa.h | 55 ++++++++++++++
> drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 130 ++++++++++++++++++++++++++++++++++
> drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 7 +-
> 7 files changed, 236 insertions(+), 3 deletions(-)
> create mode 100644 drivers/vdpa/mlx5/mlx5_vdpa_lm.c
>
> diff --git a/doc/guides/vdpadevs/features/mlx5.ini b/doc/guides/vdpadevs/features/mlx5.ini
> index e4ee34b..1da9c1b 100644
> --- a/doc/guides/vdpadevs/features/mlx5.ini
> +++ b/doc/guides/vdpadevs/features/mlx5.ini
> @@ -9,6 +9,7 @@ guest csum = Y
> host tso4 = Y
> host tso6 = Y
> version 1 = Y
> +log all = Y
> any layout = Y
> guest announce = Y
> mq = Y
> diff --git a/drivers/vdpa/mlx5/Makefile b/drivers/vdpa/mlx5/Makefile
> index 2f70a98..4d1f528 100644
> --- a/drivers/vdpa/mlx5/Makefile
> +++ b/drivers/vdpa/mlx5/Makefile
> @@ -12,6 +12,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX5_VDPA_PMD) += mlx5_vdpa_mem.c
> SRCS-$(CONFIG_RTE_LIBRTE_MLX5_VDPA_PMD) += mlx5_vdpa_event.c
> SRCS-$(CONFIG_RTE_LIBRTE_MLX5_VDPA_PMD) += mlx5_vdpa_virtq.c
> SRCS-$(CONFIG_RTE_LIBRTE_MLX5_VDPA_PMD) += mlx5_vdpa_steer.c
> +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_VDPA_PMD) += mlx5_vdpa_lm.c
>
>
> # Basic CFLAGS.
> diff --git a/drivers/vdpa/mlx5/meson.build b/drivers/vdpa/mlx5/meson.build
> index 2849178..2e521b8 100644
> --- a/drivers/vdpa/mlx5/meson.build
> +++ b/drivers/vdpa/mlx5/meson.build
> @@ -16,6 +16,7 @@ sources = files(
> 'mlx5_vdpa_event.c',
> 'mlx5_vdpa_virtq.c',
> 'mlx5_vdpa_steer.c',
> + 'mlx5_vdpa_lm.c',
> )
> cflags_options = [
> '-std=c11',
> diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c
> index 71189c4..4ce0ba0 100644
> --- a/drivers/vdpa/mlx5/mlx5_vdpa.c
> +++ b/drivers/vdpa/mlx5/mlx5_vdpa.c
> @@ -19,7 +19,8 @@
> (1ULL << VIRTIO_F_ANY_LAYOUT) | \
> (1ULL << VIRTIO_NET_F_MQ) | \
> (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) | \
> - (1ULL << VIRTIO_F_ORDER_PLATFORM))
> + (1ULL << VIRTIO_F_ORDER_PLATFORM) | \
> + (1ULL << VHOST_F_LOG_ALL))
>
> #define MLX5_VDPA_PROTOCOL_FEATURES \
> ((1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ) | \
> @@ -127,6 +128,45 @@
> return mlx5_vdpa_virtq_enable(virtq, state);
> }
>
> +static int
> +mlx5_vdpa_features_set(int vid)
> +{
> + int did = rte_vhost_get_vdpa_device_id(vid);
> + struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
> + uint64_t log_base, log_size;
> + uint64_t features;
> + int ret;
> +
> + if (priv == NULL) {
> + DRV_LOG(ERR, "Invalid device id: %d.", did);
> + return -EINVAL;
> + }
> + ret = rte_vhost_get_negotiated_features(vid, &features);
> + if (ret) {
> + DRV_LOG(ERR, "Failed to get negotiated features.");
> + return ret;
> + }
> + if (RTE_VHOST_NEED_LOG(features)) {
> + ret = rte_vhost_get_log_base(vid, &log_base, &log_size);
> + if (ret) {
> + DRV_LOG(ERR, "Failed to get log base.");
> + return ret;
> + }
> + ret = mlx5_vdpa_dirty_bitmap_set(priv, log_base, log_size);
> + if (ret) {
> + DRV_LOG(ERR, "Failed to set dirty bitmap.");
> + return ret;
> + }
> + DRV_LOG(INFO, "mlx5 vdpa: enabling dirty logging...");
> + ret = mlx5_vdpa_logging_enable(priv, 1);
> + if (ret) {
> + DRV_LOG(ERR, "Failed t enable dirty logging.");
> + return ret;
> + }
> + }
> + return 0;
> +}
> +
> static struct rte_vdpa_dev_ops mlx5_vdpa_ops = {
> .get_queue_num = mlx5_vdpa_get_queue_num,
> .get_features = mlx5_vdpa_get_vdpa_features,
> @@ -134,7 +174,7 @@
> .dev_conf = NULL,
> .dev_close = NULL,
> .set_vring_state = mlx5_vdpa_set_vring_state,
> - .set_features = NULL,
> + .set_features = mlx5_vdpa_features_set,
> .migration_done = NULL,
> .get_vfio_group_fd = NULL,
> .get_vfio_device_fd = NULL,
> diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h
> index af78ea1..70264e4 100644
> --- a/drivers/vdpa/mlx5/mlx5_vdpa.h
> +++ b/drivers/vdpa/mlx5/mlx5_vdpa.h
> @@ -244,4 +244,59 @@ int mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
> */
> int mlx5_vdpa_steer_setup(struct mlx5_vdpa_priv *priv);
>
> +/**
> + * Enable\Disable live migration logging.
> + *
> + * @param[in] priv
> + * The vdpa driver private structure.
> + * @param[in] enable
> + * Set for enable, unset for disable.
> + *
> + * @return
> + * 0 on success, a negative value otherwise.
> + */
> +int mlx5_vdpa_logging_enable(struct mlx5_vdpa_priv *priv, int enable);
> +
> +/**
> + * Set dirty bitmap logging to allow live migration.
> + *
> + * @param[in] priv
> + * The vdpa driver private structure.
> + * @param[in] log_base
> + * Vhost log base.
> + * @param[in] log_size
> + * Vhost log size.
> + *
> + * @return
> + * 0 on success, a negative value otherwise.
> + */
> +int mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,
> + uint64_t log_size);
> +
> +/**
> + * Log all virtqs information for live migration.
> + *
> + * @param[in] priv
> + * The vdpa driver private structure.
> + * @param[in] enable
> + * Set for enable, unset for disable.
> + *
> + * @return
> + * 0 on success, a negative value otherwise.
> + */
> +int mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv);
> +
> +/**
> + * Modify virtq state to be ready or suspend.
> + *
> + * @param[in] virtq
> + * The vdpa driver private virtq structure.
> + * @param[in] state
> + * Set for ready, otherwise suspend.
> + *
> + * @return
> + * 0 on success, a negative value otherwise.
> + */
> +int mlx5_vdpa_virtq_modify(struct mlx5_vdpa_virtq *virtq, int state);
> +
> #endif /* RTE_PMD_MLX5_VDPA_H_ */
> diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
> new file mode 100644
> index 0000000..cfeec5f
> --- /dev/null
> +++ b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
> @@ -0,0 +1,130 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright 2019 Mellanox Technologies, Ltd
> + */
> +#include <rte_malloc.h>
> +#include <rte_errno.h>
> +
> +#include "mlx5_vdpa_utils.h"
> +#include "mlx5_vdpa.h"
> +
> +
> +int
> +mlx5_vdpa_logging_enable(struct mlx5_vdpa_priv *priv, int enable)
> +{
> + struct mlx5_devx_virtq_attr attr = {
> + .type = MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_DUMP_ENABLE,
> + .dirty_bitmap_dump_enable = enable,
> + };
> + struct mlx5_vdpa_virtq *virtq;
> +
> + SLIST_FOREACH(virtq, &priv->virtq_list, next) {
> + attr.queue_index = virtq->index;
> + if (mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr)) {
> + DRV_LOG(ERR, "Failed to modify virtq %d logging.",
> + virtq->index);
> + return -1;
> + }
> + }
> + return 0;
> +}
> +
> +int
> +mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,
> + uint64_t log_size)
> +{
> + struct mlx5_devx_mkey_attr mkey_attr = {
> + .addr = (uintptr_t)log_base,
> + .size = log_size,
> + .pd = priv->pdn,
> + .pg_access = 1,
> + .klm_array = NULL,
> + .klm_num = 0,
> + };
> + struct mlx5_devx_virtq_attr attr = {
> + .type = MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_PARAMS,
> + .dirty_bitmap_addr = log_base,
> + .dirty_bitmap_size = log_size,
> + };
> + struct mlx5_vdpa_query_mr *mr = rte_malloc(__func__, sizeof(*mr), 0);
> + struct mlx5_vdpa_virtq *virtq;
> +
> + if (!mr) {
> + DRV_LOG(ERR, "Failed to allocate mem for lm mr.");
> + return -1;
> + }
> + mr->umem = mlx5_glue->devx_umem_reg(priv->ctx,
> + (void *)(uintptr_t)log_base,
> + log_size, IBV_ACCESS_LOCAL_WRITE);
> + if (!mr->umem) {
> + DRV_LOG(ERR, "Failed to register umem for lm mr.");
> + goto err;
> + }
> + mkey_attr.umem_id = mr->umem->umem_id;
> + mr->mkey = mlx5_devx_cmd_mkey_create(priv->ctx, &mkey_attr);
> + if (!mr->mkey) {
> + DRV_LOG(ERR, "Failed to create Mkey for lm.");
> + goto err;
> + }
> + attr.dirty_bitmap_mkey = mr->mkey->id;
> + SLIST_FOREACH(virtq, &priv->virtq_list, next) {
> + attr.queue_index = virtq->index;
> + if (mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr)) {
> + DRV_LOG(ERR, "Failed to modify virtq %d for lm.",
> + virtq->index);
> + goto err;
> + }
> + }
> + mr->is_indirect = 0;
> + SLIST_INSERT_HEAD(&priv->mr_list, mr, next);
> + return 0;
> +err:
> + if (mr->mkey)
> + mlx5_devx_cmd_destroy(mr->mkey);
> + if (mr->umem)
> + mlx5_glue->devx_umem_dereg(mr->umem);
> + rte_free(mr);
> + return -1;
> +}
> +
> +#define MLX5_VDPA_USED_RING_LEN(size) \
> + ((size) * sizeof(struct vring_used_elem) + sizeof(uint16_t) * 3)
> +
> +int
> +mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv)
> +{
> + struct mlx5_devx_virtq_attr attr = {0};
> + struct mlx5_vdpa_virtq *virtq;
> + uint64_t features;
> + int ret = rte_vhost_get_negotiated_features(priv->vid, &features);
> +
> + if (ret) {
> + DRV_LOG(ERR, "Failed to get negotiated features.");
> + return -1;
> + }
> + if (RTE_VHOST_NEED_LOG(features)) {
> + SLIST_FOREACH(virtq, &priv->virtq_list, next) {
> + ret = mlx5_vdpa_virtq_modify(virtq, 0);
> + if (ret)
> + return -1;
> + if (mlx5_devx_cmd_query_virtq(virtq->virtq, &attr)) {
> + DRV_LOG(ERR, "Failed to query virtq %d.",
> + virtq->index);
> + return -1;
> + }
> + DRV_LOG(INFO, "Query vid %d vring %d: hw_available_idx="
> + "%d, hw_used_index=%d", priv->vid, virtq->index,
> + attr.hw_available_index, attr.hw_used_index);
> + ret = rte_vhost_set_vring_base(priv->vid, virtq->index,
> + attr.hw_available_index,
> + attr.hw_used_index);
> + if (ret) {
> + DRV_LOG(ERR, "Failed to set virtq %d base.",
> + virtq->index);
> + return -1;
> + }
> + rte_vhost_log_used_vring(priv->vid, virtq->index, 0,
> + MLX5_VDPA_USED_RING_LEN(virtq->vq_size));
> + }
> + }
> + return 0;
> +}
To avoid one more level of indentation, I would do:
if (!RTE_VHOST_NEED_LOG(features))
return 0;
Other than that:
Acked-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Thanks,
Maxime
@@ -9,6 +9,7 @@ guest csum = Y
host tso4 = Y
host tso6 = Y
version 1 = Y
+log all = Y
any layout = Y
guest announce = Y
mq = Y
@@ -12,6 +12,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX5_VDPA_PMD) += mlx5_vdpa_mem.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_VDPA_PMD) += mlx5_vdpa_event.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_VDPA_PMD) += mlx5_vdpa_virtq.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_VDPA_PMD) += mlx5_vdpa_steer.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_VDPA_PMD) += mlx5_vdpa_lm.c
# Basic CFLAGS.
@@ -16,6 +16,7 @@ sources = files(
'mlx5_vdpa_event.c',
'mlx5_vdpa_virtq.c',
'mlx5_vdpa_steer.c',
+ 'mlx5_vdpa_lm.c',
)
cflags_options = [
'-std=c11',
@@ -19,7 +19,8 @@
(1ULL << VIRTIO_F_ANY_LAYOUT) | \
(1ULL << VIRTIO_NET_F_MQ) | \
(1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) | \
- (1ULL << VIRTIO_F_ORDER_PLATFORM))
+ (1ULL << VIRTIO_F_ORDER_PLATFORM) | \
+ (1ULL << VHOST_F_LOG_ALL))
#define MLX5_VDPA_PROTOCOL_FEATURES \
((1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ) | \
@@ -127,6 +128,45 @@
return mlx5_vdpa_virtq_enable(virtq, state);
}
+static int
+mlx5_vdpa_features_set(int vid)
+{
+ int did = rte_vhost_get_vdpa_device_id(vid);
+ struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
+ uint64_t log_base, log_size;
+ uint64_t features;
+ int ret;
+
+ if (priv == NULL) {
+ DRV_LOG(ERR, "Invalid device id: %d.", did);
+ return -EINVAL;
+ }
+ ret = rte_vhost_get_negotiated_features(vid, &features);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to get negotiated features.");
+ return ret;
+ }
+ if (RTE_VHOST_NEED_LOG(features)) {
+ ret = rte_vhost_get_log_base(vid, &log_base, &log_size);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to get log base.");
+ return ret;
+ }
+ ret = mlx5_vdpa_dirty_bitmap_set(priv, log_base, log_size);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to set dirty bitmap.");
+ return ret;
+ }
+ DRV_LOG(INFO, "mlx5 vdpa: enabling dirty logging...");
+ ret = mlx5_vdpa_logging_enable(priv, 1);
+ if (ret) {
+ DRV_LOG(ERR, "Failed t enable dirty logging.");
+ return ret;
+ }
+ }
+ return 0;
+}
+
static struct rte_vdpa_dev_ops mlx5_vdpa_ops = {
.get_queue_num = mlx5_vdpa_get_queue_num,
.get_features = mlx5_vdpa_get_vdpa_features,
@@ -134,7 +174,7 @@
.dev_conf = NULL,
.dev_close = NULL,
.set_vring_state = mlx5_vdpa_set_vring_state,
- .set_features = NULL,
+ .set_features = mlx5_vdpa_features_set,
.migration_done = NULL,
.get_vfio_group_fd = NULL,
.get_vfio_device_fd = NULL,
@@ -244,4 +244,59 @@ int mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
*/
int mlx5_vdpa_steer_setup(struct mlx5_vdpa_priv *priv);
+/**
+ * Enable\Disable live migration logging.
+ *
+ * @param[in] priv
+ * The vdpa driver private structure.
+ * @param[in] enable
+ * Set for enable, unset for disable.
+ *
+ * @return
+ * 0 on success, a negative value otherwise.
+ */
+int mlx5_vdpa_logging_enable(struct mlx5_vdpa_priv *priv, int enable);
+
+/**
+ * Set dirty bitmap logging to allow live migration.
+ *
+ * @param[in] priv
+ * The vdpa driver private structure.
+ * @param[in] log_base
+ * Vhost log base.
+ * @param[in] log_size
+ * Vhost log size.
+ *
+ * @return
+ * 0 on success, a negative value otherwise.
+ */
+int mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,
+ uint64_t log_size);
+
+/**
+ * Log all virtqs information for live migration.
+ *
+ * @param[in] priv
+ * The vdpa driver private structure.
+ * @param[in] enable
+ * Set for enable, unset for disable.
+ *
+ * @return
+ * 0 on success, a negative value otherwise.
+ */
+int mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv);
+
+/**
+ * Modify virtq state to be ready or suspend.
+ *
+ * @param[in] virtq
+ * The vdpa driver private virtq structure.
+ * @param[in] state
+ * Set for ready, otherwise suspend.
+ *
+ * @return
+ * 0 on success, a negative value otherwise.
+ */
+int mlx5_vdpa_virtq_modify(struct mlx5_vdpa_virtq *virtq, int state);
+
#endif /* RTE_PMD_MLX5_VDPA_H_ */
new file mode 100644
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2019 Mellanox Technologies, Ltd
+ */
+#include <rte_malloc.h>
+#include <rte_errno.h>
+
+#include "mlx5_vdpa_utils.h"
+#include "mlx5_vdpa.h"
+
+
+int
+mlx5_vdpa_logging_enable(struct mlx5_vdpa_priv *priv, int enable)
+{
+ struct mlx5_devx_virtq_attr attr = {
+ .type = MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_DUMP_ENABLE,
+ .dirty_bitmap_dump_enable = enable,
+ };
+ struct mlx5_vdpa_virtq *virtq;
+
+ SLIST_FOREACH(virtq, &priv->virtq_list, next) {
+ attr.queue_index = virtq->index;
+ if (mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr)) {
+ DRV_LOG(ERR, "Failed to modify virtq %d logging.",
+ virtq->index);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+int
+mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,
+ uint64_t log_size)
+{
+ struct mlx5_devx_mkey_attr mkey_attr = {
+ .addr = (uintptr_t)log_base,
+ .size = log_size,
+ .pd = priv->pdn,
+ .pg_access = 1,
+ .klm_array = NULL,
+ .klm_num = 0,
+ };
+ struct mlx5_devx_virtq_attr attr = {
+ .type = MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_PARAMS,
+ .dirty_bitmap_addr = log_base,
+ .dirty_bitmap_size = log_size,
+ };
+ struct mlx5_vdpa_query_mr *mr = rte_malloc(__func__, sizeof(*mr), 0);
+ struct mlx5_vdpa_virtq *virtq;
+
+ if (!mr) {
+ DRV_LOG(ERR, "Failed to allocate mem for lm mr.");
+ return -1;
+ }
+ mr->umem = mlx5_glue->devx_umem_reg(priv->ctx,
+ (void *)(uintptr_t)log_base,
+ log_size, IBV_ACCESS_LOCAL_WRITE);
+ if (!mr->umem) {
+ DRV_LOG(ERR, "Failed to register umem for lm mr.");
+ goto err;
+ }
+ mkey_attr.umem_id = mr->umem->umem_id;
+ mr->mkey = mlx5_devx_cmd_mkey_create(priv->ctx, &mkey_attr);
+ if (!mr->mkey) {
+ DRV_LOG(ERR, "Failed to create Mkey for lm.");
+ goto err;
+ }
+ attr.dirty_bitmap_mkey = mr->mkey->id;
+ SLIST_FOREACH(virtq, &priv->virtq_list, next) {
+ attr.queue_index = virtq->index;
+ if (mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr)) {
+ DRV_LOG(ERR, "Failed to modify virtq %d for lm.",
+ virtq->index);
+ goto err;
+ }
+ }
+ mr->is_indirect = 0;
+ SLIST_INSERT_HEAD(&priv->mr_list, mr, next);
+ return 0;
+err:
+ if (mr->mkey)
+ mlx5_devx_cmd_destroy(mr->mkey);
+ if (mr->umem)
+ mlx5_glue->devx_umem_dereg(mr->umem);
+ rte_free(mr);
+ return -1;
+}
+
+#define MLX5_VDPA_USED_RING_LEN(size) \
+ ((size) * sizeof(struct vring_used_elem) + sizeof(uint16_t) * 3)
+
+int
+mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv)
+{
+ struct mlx5_devx_virtq_attr attr = {0};
+ struct mlx5_vdpa_virtq *virtq;
+ uint64_t features;
+ int ret = rte_vhost_get_negotiated_features(priv->vid, &features);
+
+ if (ret) {
+ DRV_LOG(ERR, "Failed to get negotiated features.");
+ return -1;
+ }
+ if (RTE_VHOST_NEED_LOG(features)) {
+ SLIST_FOREACH(virtq, &priv->virtq_list, next) {
+ ret = mlx5_vdpa_virtq_modify(virtq, 0);
+ if (ret)
+ return -1;
+ if (mlx5_devx_cmd_query_virtq(virtq->virtq, &attr)) {
+ DRV_LOG(ERR, "Failed to query virtq %d.",
+ virtq->index);
+ return -1;
+ }
+ DRV_LOG(INFO, "Query vid %d vring %d: hw_available_idx="
+ "%d, hw_used_index=%d", priv->vid, virtq->index,
+ attr.hw_available_index, attr.hw_used_index);
+ ret = rte_vhost_set_vring_base(priv->vid, virtq->index,
+ attr.hw_available_index,
+ attr.hw_used_index);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to set virtq %d base.",
+ virtq->index);
+ return -1;
+ }
+ rte_vhost_log_used_vring(priv->vid, virtq->index, 0,
+ MLX5_VDPA_USED_RING_LEN(virtq->vq_size));
+ }
+ }
+ return 0;
+}
@@ -112,7 +112,7 @@
priv->features = 0;
}
-static int
+int
mlx5_vdpa_virtq_modify(struct mlx5_vdpa_virtq *virtq, int state)
{
struct mlx5_devx_virtq_attr attr = {
@@ -253,6 +253,11 @@
if (mlx5_vdpa_virtq_modify(virtq, 1))
goto error;
virtq->enable = 1;
+ virtq->priv = priv;
+ /* Be sure notifications are not missed during configuration. */
+ claim_zero(rte_vhost_enable_guest_notification(priv->vid, index, 1));
+ rte_write32(virtq->index, priv->virtq_db_addr);
+ /* Setup doorbell mapping. */
virtq->intr_handle.fd = vq.kickfd;
virtq->intr_handle.type = RTE_INTR_HANDLE_EXT;
if (rte_intr_callback_register(&virtq->intr_handle,