@@ -106,13 +106,34 @@
return 0;
}
+static int
+mlx5_vdpa_set_vring_state(int vid, int vring, int state)
+{
+ int did = rte_vhost_get_vdpa_device_id(vid);
+ struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
+ struct mlx5_vdpa_virtq *virtq = NULL;
+
+ if (priv == NULL) {
+ DRV_LOG(ERR, "Invalid device id: %d.", did);
+ return -EINVAL;
+ }
+ SLIST_FOREACH(virtq, &priv->virtq_list, next)
+ if (virtq->index == vring)
+ break;
+ if (!virtq) {
+ DRV_LOG(ERR, "Invalid or unconfigured vring id: %d.", vring);
+ return -EINVAL;
+ }
+ return mlx5_vdpa_virtq_enable(virtq, state);
+}
+
static struct rte_vdpa_dev_ops mlx5_vdpa_ops = {
.get_queue_num = mlx5_vdpa_get_queue_num,
.get_features = mlx5_vdpa_get_vdpa_features,
.get_protocol_features = mlx5_vdpa_get_protocol_features,
.dev_conf = NULL,
.dev_close = NULL,
- .set_vring_state = NULL,
+ .set_vring_state = mlx5_vdpa_set_vring_state,
.set_features = NULL,
.migration_done = NULL,
.get_vfio_group_fd = NULL,
@@ -64,8 +64,10 @@ struct mlx5_vdpa_query_mr {
struct mlx5_vdpa_virtq {
SLIST_ENTRY(mlx5_vdpa_virtq) next;
+ uint8_t enable;
uint16_t index;
uint16_t vq_size;
+ struct mlx5_vdpa_priv *priv;
struct mlx5_devx_obj *virtq;
struct mlx5_vdpa_event_qp eqp;
struct {
@@ -207,6 +209,19 @@ int mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
int mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv);
/**
+ * Enable\Disable virtq..
+ *
+ * @param[in] virtq
+ * The vdpa driver private virtq structure.
+ * @param[in] enable
+ * Set to enable, otherwise disable.
+ *
+ * @return
+ * 0 on success, a negative value otherwise.
+ */
+int mlx5_vdpa_virtq_enable(struct mlx5_vdpa_virtq *virtq, int enable);
+
+/**
* Unset steering and release all its related resources- stop traffic.
*
* @param[in] priv
@@ -73,7 +73,7 @@
}
#define MLX5_VDPA_DEFAULT_RQT_SIZE 512
-static int __rte_unused
+static int
mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv)
{
struct mlx5_vdpa_virtq *virtq;
@@ -91,7 +91,8 @@
return -ENOMEM;
}
SLIST_FOREACH(virtq, &priv->virtq_list, next) {
- if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) {
+ if (is_virtq_recvq(virtq->index, priv->nr_virtqs) &&
+ virtq->enable) {
attr->rq_list[i] = virtq->virtq->id;
i++;
}
@@ -116,6 +117,23 @@
return ret;
}
+int
+mlx5_vdpa_virtq_enable(struct mlx5_vdpa_virtq *virtq, int enable)
+{
+ struct mlx5_vdpa_priv *priv = virtq->priv;
+ int ret = 0;
+
+ if (virtq->enable == !!enable)
+ return 0;
+ virtq->enable = !!enable;
+ if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) {
+ ret = mlx5_vdpa_rqt_prepare(priv);
+ if (ret)
+ virtq->enable = !enable;
+ }
+ return ret;
+}
+
static int __rte_unused
mlx5_vdpa_rss_flows_create(struct mlx5_vdpa_priv *priv)
{
@@ -15,13 +15,13 @@
static int
mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq)
{
- int i;
+ unsigned int;
if (virtq->virtq) {
claim_zero(mlx5_devx_cmd_destroy(virtq->virtq));
virtq->virtq = NULL;
}
- for (i = 0; i < 3; ++i) {
+ for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
if (virtq->umems[i].obj)
claim_zero(mlx5_glue->devx_umem_dereg
(virtq->umems[i].obj));
@@ -60,6 +60,19 @@
priv->features = 0;
}
+static int
+mlx5_vdpa_virtq_modify(struct mlx5_vdpa_virtq *virtq, int state)
+{
+ struct mlx5_devx_virtq_attr attr = {
+ .type = MLX5_VIRTQ_MODIFY_TYPE_STATE,
+ .state = state ? MLX5_VIRTQ_STATE_RDY :
+ MLX5_VIRTQ_STATE_SUSPEND,
+ .queue_index = virtq->index,
+ };
+
+ return mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr);
+}
+
static uint64_t
mlx5_vdpa_hva_to_gpa(struct rte_vhost_memory *mem, uint64_t hva)
{
@@ -86,7 +99,7 @@
struct mlx5_devx_virtq_attr attr = {0};
uint64_t gpa;
int ret;
- int i;
+ unsigned i;
uint16_t last_avail_idx;
uint16_t last_used_idx;
@@ -125,7 +138,7 @@
" need event QPs and event mechanism.", index);
}
/* Setup 3 UMEMs for each virtq. */
- for (i = 0; i < 3; ++i) {
+ for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
virtq->umems[i].size = priv->caps.umems[i].a * vq.size +
priv->caps.umems[i].b;
virtq->umems[i].buf = rte_zmalloc(__func__,
@@ -182,8 +195,12 @@
attr.tis_id = priv->tis->id;
attr.queue_index = index;
virtq->virtq = mlx5_devx_cmd_create_virtq(priv->ctx, &attr);
+ virtq->priv = priv;
if (!virtq->virtq)
goto error;
+ if (mlx5_vdpa_virtq_modify(virtq, 1))
+ goto error;
+ virtq->enable = 1;
return 0;
error:
mlx5_vdpa_virtq_unset(virtq);