[PATCH 1/2] zsmalloc: require GFP in zs_malloc()

From: Sergey Senozhatsky
Date: Thu Apr 28 2016 - 11:22:47 EST


Pass GFP flags to zs_malloc() instead of using a fixed set
(supplied during pool creation), so we can be more flexible,
but, more importantly, this will be need to switch zram to
per-cpu compression streams.

Apart from that, this also align zs_malloc() interface with
zspool/zbud.

Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@xxxxxxxxx>
---
drivers/block/zram/zram_drv.c | 2 +-
include/linux/zsmalloc.h | 2 +-
mm/zsmalloc.c | 15 ++++++---------
3 files changed, 8 insertions(+), 11 deletions(-)

diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 370c2f7..9030992 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -717,7 +717,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
src = uncmem;
}

- handle = zs_malloc(meta->mem_pool, clen);
+ handle = zs_malloc(meta->mem_pool, clen, GFP_NOIO | __GFP_HIGHMEM);
if (!handle) {
pr_err("Error allocating memory for compressed page: %u, size=%zu\n",
index, clen);
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
index 34eb160..6d89f8b 100644
--- a/include/linux/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -44,7 +44,7 @@ struct zs_pool;
struct zs_pool *zs_create_pool(const char *name, gfp_t flags);
void zs_destroy_pool(struct zs_pool *pool);

-unsigned long zs_malloc(struct zs_pool *pool, size_t size);
+unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t flags);
void zs_free(struct zs_pool *pool, unsigned long obj);

void *zs_map_object(struct zs_pool *pool, unsigned long handle,
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index a0890e9..2c22aff 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -247,7 +247,6 @@ struct zs_pool {
struct size_class **size_class;
struct kmem_cache *handle_cachep;

- gfp_t flags; /* allocation flags used when growing pool */
atomic_long_t pages_allocated;

struct zs_pool_stats stats;
@@ -295,10 +294,10 @@ static void destroy_handle_cache(struct zs_pool *pool)
kmem_cache_destroy(pool->handle_cachep);
}

-static unsigned long alloc_handle(struct zs_pool *pool)
+static unsigned long alloc_handle(struct zs_pool *pool, gfp_t gfp)
{
return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
- pool->flags & ~__GFP_HIGHMEM);
+ gfp & ~__GFP_HIGHMEM);
}

static void free_handle(struct zs_pool *pool, unsigned long handle)
@@ -335,7 +334,7 @@ static void zs_zpool_destroy(void *pool)
static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp,
unsigned long *handle)
{
- *handle = zs_malloc(pool, size);
+ *handle = zs_malloc(pool, size, gfp);
return *handle ? 0 : -1;
}
static void zs_zpool_free(void *pool, unsigned long handle)
@@ -1391,7 +1390,7 @@ static unsigned long obj_malloc(struct size_class *class,
* otherwise 0.
* Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
*/
-unsigned long zs_malloc(struct zs_pool *pool, size_t size)
+unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
{
unsigned long handle, obj;
struct size_class *class;
@@ -1400,7 +1399,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
return 0;

- handle = alloc_handle(pool);
+ handle = alloc_handle(pool, gfp);
if (!handle)
return 0;

@@ -1413,7 +1412,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)

if (!first_page) {
spin_unlock(&class->lock);
- first_page = alloc_zspage(class, pool->flags);
+ first_page = alloc_zspage(class, gfp);
if (unlikely(!first_page)) {
free_handle(pool, handle);
return 0;
@@ -1945,8 +1944,6 @@ struct zs_pool *zs_create_pool(const char *name, gfp_t flags)
prev_class = class;
}

- pool->flags = flags;
-
if (zs_pool_stat_create(pool, name))
goto err;

--
2.8.0