[PATCHv3 3/3] drm/vgem: Enable dmabuf import interfaces

From: Laura Abbott
Date: Tue May 02 2017 - 13:02:43 EST


Enable the GEM dma-buf import interfaces in addition to the export
interfaces. This lets vgem be used as a test source for other allocators
(e.g. Ion).

Cc: intel-gfx@xxxxxxxxxxxxxxxxxxxxx
Reviewed-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>
Signed-off-by: Laura Abbott <labbott@xxxxxxxxxx>
---
v3: Minor fixes suggested by Chris Wilson
---
drivers/gpu/drm/vgem/vgem_drv.c | 133 +++++++++++++++++++++++++++++++---------
drivers/gpu/drm/vgem/vgem_drv.h | 2 +
2 files changed, 106 insertions(+), 29 deletions(-)

diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 727eed2..c254c80 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -34,6 +34,9 @@
#include <linux/ramfs.h>
#include <linux/shmem_fs.h>
#include <linux/dma-buf.h>
+
+#include <drm/drmP.h>
+
#include "vgem_drv.h"

#define DRIVER_NAME "vgem"
@@ -46,6 +49,11 @@ static void vgem_gem_free_object(struct drm_gem_object *obj)
{
struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);

+ drm_free_large(vgem_obj->pages);
+
+ if (obj->import_attach)
+ drm_prime_gem_destroy(obj, vgem_obj->table);
+
drm_gem_object_release(obj);
kfree(vgem_obj);
}
@@ -56,26 +64,49 @@ static int vgem_gem_fault(struct vm_fault *vmf)
struct drm_vgem_gem_object *obj = vma->vm_private_data;
/* We don't use vmf->pgoff since that has the fake offset */
unsigned long vaddr = vmf->address;
- struct page *page;
-
- page = shmem_read_mapping_page(file_inode(obj->base.filp)->i_mapping,
- (vaddr - vma->vm_start) >> PAGE_SHIFT);
- if (!IS_ERR(page)) {
- vmf->page = page;
- return 0;
- } else switch (PTR_ERR(page)) {
- case -ENOSPC:
- case -ENOMEM:
- return VM_FAULT_OOM;
- case -EBUSY:
- return VM_FAULT_RETRY;
- case -EFAULT:
- case -EINVAL:
- return VM_FAULT_SIGBUS;
- default:
- WARN_ON_ONCE(PTR_ERR(page));
- return VM_FAULT_SIGBUS;
+ int ret;
+ loff_t num_pages;
+ pgoff_t page_offset;
+ page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
+
+ num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE);
+
+ if (page_offset > num_pages)
+ return VM_FAULT_SIGBUS;
+
+ if (obj->pages) {
+ get_page(obj->pages[page_offset]);
+ vmf->page = obj->pages[page_offset];
+ ret = 0;
+ } else {
+ struct page *page;
+
+ page = shmem_read_mapping_page(
+ file_inode(obj->base.filp)->i_mapping,
+ page_offset);
+ if (!IS_ERR(page)) {
+ vmf->page = page;
+ ret = 0;
+ } else switch (PTR_ERR(page)) {
+ case -ENOSPC:
+ case -ENOMEM:
+ ret = VM_FAULT_OOM;
+ break;
+ case -EBUSY:
+ ret = VM_FAULT_RETRY;
+ break;
+ case -EFAULT:
+ case -EINVAL:
+ ret = VM_FAULT_SIGBUS;
+ break;
+ default:
+ WARN_ON(PTR_ERR(page));
+ ret = VM_FAULT_SIGBUS;
+ break;
+ }
+
}
+ return ret;
}

static const struct vm_operations_struct vgem_gem_vm_ops = {
@@ -112,12 +143,8 @@ static void vgem_postclose(struct drm_device *dev, struct drm_file *file)
kfree(vfile);
}

-/* ioctls */
-
-static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
- struct drm_file *file,
- unsigned int *handle,
- unsigned long size)
+static struct drm_vgem_gem_object *__vgem_gem_create(struct drm_device *dev,
+ unsigned long size)
{
struct drm_vgem_gem_object *obj;
int ret;
@@ -127,8 +154,31 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
return ERR_PTR(-ENOMEM);

ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
- if (ret)
- goto err_free;
+ if (ret) {
+ kfree(obj);
+ return ERR_PTR(ret);
+ }
+
+ return obj;
+}
+
+static void __vgem_gem_destroy(struct drm_vgem_gem_object *obj)
+{
+ drm_gem_object_release(&obj->base);
+ kfree(obj);
+}
+
+static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
+ struct drm_file *file,
+ unsigned int *handle,
+ unsigned long size)
+{
+ struct drm_vgem_gem_object *obj;
+ int ret;
+
+ obj = __vgem_gem_create(dev, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);

ret = drm_gem_handle_create(file, &obj->base, handle);
drm_gem_object_unreference_unlocked(&obj->base);
@@ -137,9 +187,8 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,

return &obj->base;

-err_free:
- kfree(obj);
err:
+ __vgem_gem_destroy(obj);
return ERR_PTR(ret);
}

@@ -256,6 +305,29 @@ static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
return st;
}

+static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach, struct sg_table *sg)
+{
+ struct drm_vgem_gem_object *obj;
+ int npages;
+
+ obj = __vgem_gem_create(dev, attach->dmabuf->size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
+
+ obj->table = sg;
+ obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
+ if (!obj->pages) {
+ __vgem_gem_destroy(obj);
+ return ERR_PTR(-ENOMEM);
+ }
+ drm_prime_sg_to_page_addr_arrays(obj->table, obj->pages, NULL,
+ npages);
+ return &obj->base;
+}
+
static void *vgem_prime_vmap(struct drm_gem_object *obj)
{
long n_pages = obj->size >> PAGE_SHIFT;
@@ -314,8 +386,11 @@ static struct drm_driver vgem_driver = {
.dumb_map_offset = vgem_gem_dumb_map,

.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_pin = vgem_prime_pin,
+ .gem_prime_import = drm_gem_prime_import_platform,
.gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import_sg_table = vgem_prime_import_sg_table,
.gem_prime_get_sg_table = vgem_prime_get_sg_table,
.gem_prime_vmap = vgem_prime_vmap,
.gem_prime_vunmap = vgem_prime_vunmap,
diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h
index cb59c7a..1aae014 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.h
+++ b/drivers/gpu/drm/vgem/vgem_drv.h
@@ -43,6 +43,8 @@ struct vgem_file {
#define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base)
struct drm_vgem_gem_object {
struct drm_gem_object base;
+ struct page **pages;
+ struct sg_table *table;
};

int vgem_fence_open(struct vgem_file *file);
--
2.7.4