Re: [RFC v2 PATCH] vdpa_sim: implement .reset_map support

From: Si-Wei Liu
Date: Wed Oct 18 2023 - 19:48:31 EST




On 10/18/2023 1:05 AM, Stefano Garzarella wrote:
On Tue, Oct 17, 2023 at 10:11:33PM -0700, Si-Wei Liu wrote:
RFC only. Not tested on vdpa-sim-blk with user virtual address.
Works fine with vdpa-sim-net which uses physical address to map.

This patch is based on top of [1].

[1] https://lore.kernel.org/virtualization/1696928580-7520-1-git-send-email-si-wei.liu@xxxxxxxxxx/

Signed-off-by: Si-Wei Liu <si-wei.liu@xxxxxxxxxx>

---
RFC v2:
 - initialize iotlb to passthrough mode in device add

I tested this version and I didn't see any issue ;-)
Great, thank you so much for your help on testing my patch, Stefano!
Just for my own interest/curiosity, currently there's no vhost-vdpa backend client implemented for vdpa-sim-blk or any vdpa block device in userspace as yet, correct? So there was no test specific to vhost-vdpa that needs to be exercised, right?

Thanks,
-Siwei




Tested-by: Stefano Garzarella <sgarzare@xxxxxxxxxx>

---
drivers/vdpa/vdpa_sim/vdpa_sim.c | 34 ++++++++++++++++++++++++--------
1 file changed, 26 insertions(+), 8 deletions(-)

diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index 76d41058add9..2a0a6042d61d 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -151,13 +151,6 @@ static void vdpasim_do_reset(struct vdpasim *vdpasim)
                 &vdpasim->iommu_lock);
    }

-    for (i = 0; i < vdpasim->dev_attr.nas; i++) {
-        vhost_iotlb_reset(&vdpasim->iommu[i]);
-        vhost_iotlb_add_range(&vdpasim->iommu[i], 0, ULONG_MAX,
-                      0, VHOST_MAP_RW);
-        vdpasim->iommu_pt[i] = true;
-    }
-
    vdpasim->running = true;
    spin_unlock(&vdpasim->iommu_lock);

@@ -259,8 +252,12 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr,
    if (!vdpasim->iommu_pt)
        goto err_iommu;

-    for (i = 0; i < vdpasim->dev_attr.nas; i++)
+    for (i = 0; i < vdpasim->dev_attr.nas; i++) {
        vhost_iotlb_init(&vdpasim->iommu[i], max_iotlb_entries, 0);
+        vhost_iotlb_add_range(&vdpasim->iommu[i], 0, ULONG_MAX, 0,
+                      VHOST_MAP_RW);
+        vdpasim->iommu_pt[i] = true;
+    }

    for (i = 0; i < dev_attr->nvqs; i++)
        vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0],
@@ -637,6 +634,25 @@ static int vdpasim_set_map(struct vdpa_device *vdpa, unsigned int asid,
    return ret;
}

+static int vdpasim_reset_map(struct vdpa_device *vdpa, unsigned int asid)
+{
+    struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+
+    if (asid >= vdpasim->dev_attr.nas)
+        return -EINVAL;
+
+    spin_lock(&vdpasim->iommu_lock);
+    if (vdpasim->iommu_pt[asid])
+        goto out;
+    vhost_iotlb_reset(&vdpasim->iommu[asid]);
+    vhost_iotlb_add_range(&vdpasim->iommu[asid], 0, ULONG_MAX,
+                  0, VHOST_MAP_RW);
+    vdpasim->iommu_pt[asid] = true;
+out:
+    spin_unlock(&vdpasim->iommu_lock);
+    return 0;
+}
+
static int vdpasim_bind_mm(struct vdpa_device *vdpa, struct mm_struct *mm)
{
    struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
@@ -759,6 +775,7 @@ static const struct vdpa_config_ops vdpasim_config_ops = {
    .set_group_asid         = vdpasim_set_group_asid,
    .dma_map                = vdpasim_dma_map,
    .dma_unmap              = vdpasim_dma_unmap,
+    .reset_map              = vdpasim_reset_map,
    .bind_mm        = vdpasim_bind_mm,
    .unbind_mm        = vdpasim_unbind_mm,
    .free                   = vdpasim_free,
@@ -796,6 +813,7 @@ static const struct vdpa_config_ops vdpasim_batch_config_ops = {
    .get_iova_range         = vdpasim_get_iova_range,
    .set_group_asid         = vdpasim_set_group_asid,
    .set_map                = vdpasim_set_map,
+    .reset_map              = vdpasim_reset_map,
    .bind_mm        = vdpasim_bind_mm,
    .unbind_mm        = vdpasim_unbind_mm,
    .free                   = vdpasim_free,
--
2.39.3