Re: [PATCH v19 12/30] drm/shmem-helper: Prepare drm_gem_shmem_free() to shrinker addition

From: Boris Brezillon
Date: Thu Jan 25 2024 - 03:02:26 EST


On Fri, 5 Jan 2024 21:46:06 +0300
Dmitry Osipenko <dmitry.osipenko@xxxxxxxxxxxxx> wrote:

> Prepare drm_gem_shmem_free() to addition of memory shrinker support
> to drm-shmem by adding and using variant of put_pages() that doesn't
> touch reservation lock. Reservation shouldn't be touched because lockdep
> will trigger a bogus warning about locking contention with fs_reclaim
> code paths that can't happen during the time when GEM is freed and
> lockdep doesn't know about that.
>
> Signed-off-by: Dmitry Osipenko <dmitry.osipenko@xxxxxxxxxxxxx>

Reviewed-by: Boris Brezillon <boris.brezillon@xxxxxxxxxxxxx>

> ---
> drivers/gpu/drm/drm_gem_shmem_helper.c | 40 ++++++++++++++------------
> 1 file changed, 21 insertions(+), 19 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
> index 3403700780c3..799a3c5015ad 100644
> --- a/drivers/gpu/drm/drm_gem_shmem_helper.c
> +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
> @@ -128,6 +128,22 @@ struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t
> }
> EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
>
> +static void
> +drm_gem_shmem_free_pages(struct drm_gem_shmem_object *shmem)
> +{
> + struct drm_gem_object *obj = &shmem->base;
> +
> +#ifdef CONFIG_X86
> + if (shmem->map_wc)
> + set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
> +#endif
> +
> + drm_gem_put_pages(obj, shmem->pages,
> + shmem->pages_mark_dirty_on_put,
> + shmem->pages_mark_accessed_on_put);
> + shmem->pages = NULL;
> +}
> +
> /**
> * drm_gem_shmem_free - Free resources associated with a shmem GEM object
> * @shmem: shmem GEM object to free
> @@ -142,8 +158,6 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
> if (obj->import_attach) {
> drm_prime_gem_destroy(obj, shmem->sgt);
> } else {
> - dma_resv_lock(shmem->base.resv, NULL);
> -
> drm_WARN_ON(obj->dev, refcount_read(&shmem->vmap_use_count));
>
> if (shmem->sgt) {
> @@ -152,13 +166,12 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
> sg_free_table(shmem->sgt);
> kfree(shmem->sgt);
> }
> - if (shmem->pages)
> - drm_gem_shmem_put_pages_locked(shmem);
> + if (shmem->pages &&
> + refcount_dec_and_test(&shmem->pages_use_count))
> + drm_gem_shmem_free_pages(shmem);
>
> drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_use_count));
> drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_pin_count));
> -
> - dma_resv_unlock(shmem->base.resv);
> }
>
> drm_gem_object_release(obj);
> @@ -208,21 +221,10 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
> */
> void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
> {
> - struct drm_gem_object *obj = &shmem->base;
> -
> dma_resv_assert_held(shmem->base.resv);
>
> - if (refcount_dec_and_test(&shmem->pages_use_count)) {
> -#ifdef CONFIG_X86
> - if (shmem->map_wc)
> - set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
> -#endif
> -
> - drm_gem_put_pages(obj, shmem->pages,
> - shmem->pages_mark_dirty_on_put,
> - shmem->pages_mark_accessed_on_put);
> - shmem->pages = NULL;
> - }
> + if (refcount_dec_and_test(&shmem->pages_use_count))
> + drm_gem_shmem_free_pages(shmem);
> }
> EXPORT_SYMBOL_GPL(drm_gem_shmem_put_pages_locked);
>