[v3,07/10] vdpa/sfc: add support to get queue notify area info
Checks
Commit Message
From: Vijay Kumar Srivastava <vsrivast@xilinx.com>
Implement the vDPA ops get_notify_area to get the notify
area info of the queue.
Signed-off-by: Vijay Kumar Srivastava <vsrivast@xilinx.com>
Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
---
v2:
* Added error log in sfc_vdpa_get_notify_area.
drivers/vdpa/sfc/sfc_vdpa_ops.c | 168 ++++++++++++++++++++++++++++++++++++++--
drivers/vdpa/sfc/sfc_vdpa_ops.h | 2 +
2 files changed, 164 insertions(+), 6 deletions(-)
Comments
Hi Vijay,
> -----Original Message-----
> From: Vijay Srivastava <vijay.srivastava@xilinx.com>
> Sent: Friday, October 29, 2021 10:47 PM
> To: dev@dpdk.org
> Cc: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>;
> andrew.rybchenko@oktetlabs.ru; Vijay Kumar Srivastava <vsrivast@xilinx.com>
> Subject: [PATCH v3 07/10] vdpa/sfc: add support to get queue notify area info
>
> From: Vijay Kumar Srivastava <vsrivast@xilinx.com>
>
> Implement the vDPA ops get_notify_area to get the notify
> area info of the queue.
>
> Signed-off-by: Vijay Kumar Srivastava <vsrivast@xilinx.com>
> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
> ---
> v2:
> * Added error log in sfc_vdpa_get_notify_area.
>
> drivers/vdpa/sfc/sfc_vdpa_ops.c | 168 ++++++++++++++++++++++++++++++++++++++-
> -
> drivers/vdpa/sfc/sfc_vdpa_ops.h | 2 +
> 2 files changed, 164 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/vdpa/sfc/sfc_vdpa_ops.c b/drivers/vdpa/sfc/sfc_vdpa_ops.c
> index de1c81a..774d73e 100644
> --- a/drivers/vdpa/sfc/sfc_vdpa_ops.c
> +++ b/drivers/vdpa/sfc/sfc_vdpa_ops.c
> @@ -3,6 +3,8 @@
> * Copyright(c) 2020-2021 Xilinx, Inc.
> */
>
> +#include <pthread.h>
> +#include <unistd.h>
> #include <sys/ioctl.h>
>
> #include <rte_errno.h>
> @@ -537,6 +539,67 @@
> return 0;
> }
>
> +static void *
> +sfc_vdpa_notify_ctrl(void *arg)
> +{
> + struct sfc_vdpa_ops_data *ops_data;
> + int vid;
> +
> + ops_data = arg;
> + if (ops_data == NULL)
> + return NULL;
> +
> + sfc_vdpa_adapter_lock(ops_data->dev_handle);
> +
> + vid = ops_data->vid;
> +
> + if (rte_vhost_host_notifier_ctrl(vid, RTE_VHOST_QUEUE_ALL, true) != 0)
> + sfc_vdpa_info(ops_data->dev_handle,
> + "vDPA (%s): Notifier could not get configured",
> + ops_data->vdpa_dev->device->name);
> +
> + sfc_vdpa_adapter_unlock(ops_data->dev_handle);
> +
> + return NULL;
> +}
> +
> +static int
> +sfc_vdpa_setup_notify_ctrl(int vid)
> +{
> + int ret;
> + struct rte_vdpa_device *vdpa_dev;
> + struct sfc_vdpa_ops_data *ops_data;
> +
> + vdpa_dev = rte_vhost_get_vdpa_device(vid);
> +
> + ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
> + if (ops_data == NULL) {
> + sfc_vdpa_err(ops_data->dev_handle,
> + "invalid vDPA device : %p, vid : %d",
> + vdpa_dev, vid);
> + return -1;
> + }
Why not use struct sfc_vdpa_ops_data * as the input param rather
than vid, then use ops_data->vdpa_dev to get vdpa_dev?
As ops_data is checked as non-NULL before the func, it will make
things easier.
Thanks,
Chenbo
> +
> + ops_data->is_notify_thread_started = false;
> +
> + /*
> + * Use rte_vhost_host_notifier_ctrl in a thread to avoid
> + * dead lock scenario when multiple VFs are used in single vdpa
> + * application and multiple VFs are passed to a single VM.
> + */
> + ret = pthread_create(&ops_data->notify_tid, NULL,
> + sfc_vdpa_notify_ctrl, ops_data);
> + if (ret != 0) {
> + sfc_vdpa_err(ops_data->dev_handle,
> + "failed to create notify_ctrl thread: %s",
> + rte_strerror(ret));
> + return -1;
> + }
> + ops_data->is_notify_thread_started = true;
> +
> + return 0;
> +}
> +
> static int
> sfc_vdpa_dev_config(int vid)
> {
> @@ -570,18 +633,19 @@
> if (rc != 0)
> goto fail_vdpa_start;
>
> - sfc_vdpa_adapter_unlock(ops_data->dev_handle);
> + rc = sfc_vdpa_setup_notify_ctrl(vid);
> + if (rc != 0)
> + goto fail_vdpa_notify;
>
> - sfc_vdpa_log_init(ops_data->dev_handle, "vhost notifier ctrl");
> - if (rte_vhost_host_notifier_ctrl(vid, RTE_VHOST_QUEUE_ALL, true) != 0)
> - sfc_vdpa_info(ops_data->dev_handle,
> - "vDPA (%s): software relay for notify is used.",
> - vdpa_dev->device->name);
> + sfc_vdpa_adapter_unlock(ops_data->dev_handle);
>
> sfc_vdpa_log_init(ops_data->dev_handle, "done");
>
> return 0;
>
> +fail_vdpa_notify:
> + sfc_vdpa_stop(ops_data);
> +
> fail_vdpa_start:
> sfc_vdpa_close(ops_data);
>
> @@ -594,6 +658,7 @@
> static int
> sfc_vdpa_dev_close(int vid)
> {
> + int ret;
> struct rte_vdpa_device *vdpa_dev;
> struct sfc_vdpa_ops_data *ops_data;
>
> @@ -608,6 +673,23 @@
> }
>
> sfc_vdpa_adapter_lock(ops_data->dev_handle);
> + if (ops_data->is_notify_thread_started == true) {
> + void *status;
> + ret = pthread_cancel(ops_data->notify_tid);
> + if (ret != 0) {
> + sfc_vdpa_err(ops_data->dev_handle,
> + "failed to cancel notify_ctrl thread: %s",
> + rte_strerror(ret));
> + }
> +
> + ret = pthread_join(ops_data->notify_tid, &status);
> + if (ret != 0) {
> + sfc_vdpa_err(ops_data->dev_handle,
> + "failed to join terminated notify_ctrl
> thread: %s",
> + rte_strerror(ret));
> + }
> + }
> + ops_data->is_notify_thread_started = false;
>
> sfc_vdpa_stop(ops_data);
> sfc_vdpa_close(ops_data);
> @@ -658,6 +740,79 @@
> return vfio_dev_fd;
> }
>
> +static int
> +sfc_vdpa_get_notify_area(int vid, int qid, uint64_t *offset, uint64_t *size)
> +{
> + int ret;
> + efx_nic_t *nic;
> + int vfio_dev_fd;
> + efx_rc_t rc;
> + unsigned int bar_offset;
> + struct rte_vdpa_device *vdpa_dev;
> + struct sfc_vdpa_ops_data *ops_data;
> + struct vfio_region_info reg = { .argsz = sizeof(reg) };
> + const efx_nic_cfg_t *encp;
> + int max_vring_cnt;
> + int64_t len;
> + void *dev;
> +
> + vdpa_dev = rte_vhost_get_vdpa_device(vid);
> +
> + ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
> + if (ops_data == NULL)
> + return -1;
> +
> + dev = ops_data->dev_handle;
> +
> + vfio_dev_fd = sfc_vdpa_adapter_by_dev_handle(dev)->vfio_dev_fd;
> + max_vring_cnt =
> + (sfc_vdpa_adapter_by_dev_handle(dev)->max_queue_count * 2);
> +
> + nic = sfc_vdpa_adapter_by_dev_handle(ops_data->dev_handle)->nic;
> + encp = efx_nic_cfg_get(nic);
> +
> + if (qid >= max_vring_cnt) {
> + sfc_vdpa_err(dev, "invalid qid : %d", qid);
> + return -1;
> + }
> +
> + if (ops_data->vq_cxt[qid].enable != B_TRUE) {
> + sfc_vdpa_err(dev, "vq is not enabled");
> + return -1;
> + }
> +
> + rc = efx_virtio_get_doorbell_offset(ops_data->vq_cxt[qid].vq,
> + &bar_offset);
> + if (rc != 0) {
> + sfc_vdpa_err(dev, "failed to get doorbell offset: %s",
> + rte_strerror(rc));
> + return rc;
> + }
> +
> + reg.index = sfc_vdpa_adapter_by_dev_handle(dev)->mem_bar.esb_rid;
> + ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, ®);
> + if (ret != 0) {
> + sfc_vdpa_err(dev, "could not get device region info: %s",
> + strerror(errno));
> + return ret;
> + }
> +
> + *offset = reg.offset + bar_offset;
> +
> + len = (1U << encp->enc_vi_window_shift) / 2;
> + if (len >= sysconf(_SC_PAGESIZE)) {
> + *size = sysconf(_SC_PAGESIZE);
> + } else {
> + sfc_vdpa_err(dev, "invalid VI window size : 0x%" PRIx64, len);
> + return -1;
> + }
> +
> + sfc_vdpa_info(dev, "vDPA ops get_notify_area :: offset : 0x%" PRIx64,
> + *offset);
> +
> + return 0;
> +}
> +
> static struct rte_vdpa_dev_ops sfc_vdpa_ops = {
> .get_queue_num = sfc_vdpa_get_queue_num,
> .get_features = sfc_vdpa_get_features,
> @@ -667,6 +822,7 @@
> .set_vring_state = sfc_vdpa_set_vring_state,
> .set_features = sfc_vdpa_set_features,
> .get_vfio_device_fd = sfc_vdpa_get_vfio_device_fd,
> + .get_notify_area = sfc_vdpa_get_notify_area,
> };
>
> struct sfc_vdpa_ops_data *
> diff --git a/drivers/vdpa/sfc/sfc_vdpa_ops.h b/drivers/vdpa/sfc/sfc_vdpa_ops.h
> index 8d553c5..f7523ef 100644
> --- a/drivers/vdpa/sfc/sfc_vdpa_ops.h
> +++ b/drivers/vdpa/sfc/sfc_vdpa_ops.h
> @@ -50,6 +50,8 @@ struct sfc_vdpa_ops_data {
> struct rte_vdpa_device *vdpa_dev;
> enum sfc_vdpa_context vdpa_context;
> enum sfc_vdpa_state state;
> + pthread_t notify_tid;
> + bool is_notify_thread_started;
>
> uint64_t dev_features;
> uint64_t drv_features;
> --
> 1.8.3.1
Hi Chenbo,
>-----Original Message-----
>From: Xia, Chenbo <chenbo.xia@intel.com>
>Sent: Tuesday, November 2, 2021 1:05 PM
>To: Vijay Kumar Srivastava <vsrivast@xilinx.com>; dev@dpdk.org
>Cc: maxime.coquelin@redhat.com; andrew.rybchenko@oktetlabs.ru; Vijay
>Kumar Srivastava <vsrivast@xilinx.com>
>Subject: RE: [PATCH v3 07/10] vdpa/sfc: add support to get queue notify area
>info
>
>Hi Vijay,
>
>> -----Original Message-----
>> From: Vijay Srivastava <vijay.srivastava@xilinx.com>
>> Sent: Friday, October 29, 2021 10:47 PM
>> To: dev@dpdk.org
>> Cc: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>;
>> andrew.rybchenko@oktetlabs.ru; Vijay Kumar Srivastava
>> <vsrivast@xilinx.com>
>> Subject: [PATCH v3 07/10] vdpa/sfc: add support to get queue notify
>> area info
>>
>> From: Vijay Kumar Srivastava <vsrivast@xilinx.com>
>>
>> Implement the vDPA ops get_notify_area to get the notify area info of
>> the queue.
>>
>> Signed-off-by: Vijay Kumar Srivastava <vsrivast@xilinx.com>
>> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
>> ---
[SNIP]
>> +static int
>> +sfc_vdpa_setup_notify_ctrl(int vid)
>> +{
>> + int ret;
>> + struct rte_vdpa_device *vdpa_dev;
>> + struct sfc_vdpa_ops_data *ops_data;
>> +
>> + vdpa_dev = rte_vhost_get_vdpa_device(vid);
>> +
>> + ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
>> + if (ops_data == NULL) {
>> + sfc_vdpa_err(ops_data->dev_handle,
>> + "invalid vDPA device : %p, vid : %d",
>> + vdpa_dev, vid);
>> + return -1;
>> + }
>
>Why not use struct sfc_vdpa_ops_data * as the input param rather than vid,
>then use ops_data->vdpa_dev to get vdpa_dev?
>
>As ops_data is checked as non-NULL before the func, it will make things easier.
>
Yes. ops_data can be used as input param.
I will include this change.
@@ -3,6 +3,8 @@
* Copyright(c) 2020-2021 Xilinx, Inc.
*/
+#include <pthread.h>
+#include <unistd.h>
#include <sys/ioctl.h>
#include <rte_errno.h>
@@ -537,6 +539,67 @@
return 0;
}
+static void *
+sfc_vdpa_notify_ctrl(void *arg)
+{
+ struct sfc_vdpa_ops_data *ops_data;
+ int vid;
+
+ ops_data = arg;
+ if (ops_data == NULL)
+ return NULL;
+
+ sfc_vdpa_adapter_lock(ops_data->dev_handle);
+
+ vid = ops_data->vid;
+
+ if (rte_vhost_host_notifier_ctrl(vid, RTE_VHOST_QUEUE_ALL, true) != 0)
+ sfc_vdpa_info(ops_data->dev_handle,
+ "vDPA (%s): Notifier could not get configured",
+ ops_data->vdpa_dev->device->name);
+
+ sfc_vdpa_adapter_unlock(ops_data->dev_handle);
+
+ return NULL;
+}
+
+static int
+sfc_vdpa_setup_notify_ctrl(int vid)
+{
+ int ret;
+ struct rte_vdpa_device *vdpa_dev;
+ struct sfc_vdpa_ops_data *ops_data;
+
+ vdpa_dev = rte_vhost_get_vdpa_device(vid);
+
+ ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
+ if (ops_data == NULL) {
+ sfc_vdpa_err(ops_data->dev_handle,
+ "invalid vDPA device : %p, vid : %d",
+ vdpa_dev, vid);
+ return -1;
+ }
+
+ ops_data->is_notify_thread_started = false;
+
+ /*
+ * Use rte_vhost_host_notifier_ctrl in a thread to avoid
+ * dead lock scenario when multiple VFs are used in single vdpa
+ * application and multiple VFs are passed to a single VM.
+ */
+ ret = pthread_create(&ops_data->notify_tid, NULL,
+ sfc_vdpa_notify_ctrl, ops_data);
+ if (ret != 0) {
+ sfc_vdpa_err(ops_data->dev_handle,
+ "failed to create notify_ctrl thread: %s",
+ rte_strerror(ret));
+ return -1;
+ }
+ ops_data->is_notify_thread_started = true;
+
+ return 0;
+}
+
static int
sfc_vdpa_dev_config(int vid)
{
@@ -570,18 +633,19 @@
if (rc != 0)
goto fail_vdpa_start;
- sfc_vdpa_adapter_unlock(ops_data->dev_handle);
+ rc = sfc_vdpa_setup_notify_ctrl(vid);
+ if (rc != 0)
+ goto fail_vdpa_notify;
- sfc_vdpa_log_init(ops_data->dev_handle, "vhost notifier ctrl");
- if (rte_vhost_host_notifier_ctrl(vid, RTE_VHOST_QUEUE_ALL, true) != 0)
- sfc_vdpa_info(ops_data->dev_handle,
- "vDPA (%s): software relay for notify is used.",
- vdpa_dev->device->name);
+ sfc_vdpa_adapter_unlock(ops_data->dev_handle);
sfc_vdpa_log_init(ops_data->dev_handle, "done");
return 0;
+fail_vdpa_notify:
+ sfc_vdpa_stop(ops_data);
+
fail_vdpa_start:
sfc_vdpa_close(ops_data);
@@ -594,6 +658,7 @@
static int
sfc_vdpa_dev_close(int vid)
{
+ int ret;
struct rte_vdpa_device *vdpa_dev;
struct sfc_vdpa_ops_data *ops_data;
@@ -608,6 +673,23 @@
}
sfc_vdpa_adapter_lock(ops_data->dev_handle);
+ if (ops_data->is_notify_thread_started == true) {
+ void *status;
+ ret = pthread_cancel(ops_data->notify_tid);
+ if (ret != 0) {
+ sfc_vdpa_err(ops_data->dev_handle,
+ "failed to cancel notify_ctrl thread: %s",
+ rte_strerror(ret));
+ }
+
+ ret = pthread_join(ops_data->notify_tid, &status);
+ if (ret != 0) {
+ sfc_vdpa_err(ops_data->dev_handle,
+ "failed to join terminated notify_ctrl thread: %s",
+ rte_strerror(ret));
+ }
+ }
+ ops_data->is_notify_thread_started = false;
sfc_vdpa_stop(ops_data);
sfc_vdpa_close(ops_data);
@@ -658,6 +740,79 @@
return vfio_dev_fd;
}
+static int
+sfc_vdpa_get_notify_area(int vid, int qid, uint64_t *offset, uint64_t *size)
+{
+ int ret;
+ efx_nic_t *nic;
+ int vfio_dev_fd;
+ efx_rc_t rc;
+ unsigned int bar_offset;
+ struct rte_vdpa_device *vdpa_dev;
+ struct sfc_vdpa_ops_data *ops_data;
+ struct vfio_region_info reg = { .argsz = sizeof(reg) };
+ const efx_nic_cfg_t *encp;
+ int max_vring_cnt;
+ int64_t len;
+ void *dev;
+
+ vdpa_dev = rte_vhost_get_vdpa_device(vid);
+
+ ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
+ if (ops_data == NULL)
+ return -1;
+
+ dev = ops_data->dev_handle;
+
+ vfio_dev_fd = sfc_vdpa_adapter_by_dev_handle(dev)->vfio_dev_fd;
+ max_vring_cnt =
+ (sfc_vdpa_adapter_by_dev_handle(dev)->max_queue_count * 2);
+
+ nic = sfc_vdpa_adapter_by_dev_handle(ops_data->dev_handle)->nic;
+ encp = efx_nic_cfg_get(nic);
+
+ if (qid >= max_vring_cnt) {
+ sfc_vdpa_err(dev, "invalid qid : %d", qid);
+ return -1;
+ }
+
+ if (ops_data->vq_cxt[qid].enable != B_TRUE) {
+ sfc_vdpa_err(dev, "vq is not enabled");
+ return -1;
+ }
+
+ rc = efx_virtio_get_doorbell_offset(ops_data->vq_cxt[qid].vq,
+ &bar_offset);
+ if (rc != 0) {
+ sfc_vdpa_err(dev, "failed to get doorbell offset: %s",
+ rte_strerror(rc));
+ return rc;
+ }
+
+ reg.index = sfc_vdpa_adapter_by_dev_handle(dev)->mem_bar.esb_rid;
+ ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, ®);
+ if (ret != 0) {
+ sfc_vdpa_err(dev, "could not get device region info: %s",
+ strerror(errno));
+ return ret;
+ }
+
+ *offset = reg.offset + bar_offset;
+
+ len = (1U << encp->enc_vi_window_shift) / 2;
+ if (len >= sysconf(_SC_PAGESIZE)) {
+ *size = sysconf(_SC_PAGESIZE);
+ } else {
+ sfc_vdpa_err(dev, "invalid VI window size : 0x%" PRIx64, len);
+ return -1;
+ }
+
+ sfc_vdpa_info(dev, "vDPA ops get_notify_area :: offset : 0x%" PRIx64,
+ *offset);
+
+ return 0;
+}
+
static struct rte_vdpa_dev_ops sfc_vdpa_ops = {
.get_queue_num = sfc_vdpa_get_queue_num,
.get_features = sfc_vdpa_get_features,
@@ -667,6 +822,7 @@
.set_vring_state = sfc_vdpa_set_vring_state,
.set_features = sfc_vdpa_set_features,
.get_vfio_device_fd = sfc_vdpa_get_vfio_device_fd,
+ .get_notify_area = sfc_vdpa_get_notify_area,
};
struct sfc_vdpa_ops_data *
@@ -50,6 +50,8 @@ struct sfc_vdpa_ops_data {
struct rte_vdpa_device *vdpa_dev;
enum sfc_vdpa_context vdpa_context;
enum sfc_vdpa_state state;
+ pthread_t notify_tid;
+ bool is_notify_thread_started;
uint64_t dev_features;
uint64_t drv_features;