[PATCH] mm: cma: convert cma->lock into a spinlock

From: Roman Gushchin
Date: Thu Apr 02 2020 - 15:24:13 EST


Currently cma->lock is a mutex which protects cma->bitmap.
cma_release() grabs this mutex in cma_clear_bitmap().

It means that cma_release() can't be called from the atomic
context, which is not very convenient for a generic memory
release function.

There are two options to solve this problem:
1) introduce some sort of a delayed deallocation
2) convert the mutex into a spinlock

This patch implements the second approach.
Indeed, bitmap operations cannot sleep and should be relatively fast,
so there are no reasons why a spinlock can't do the synchronization.

Signed-off-by: Roman Gushchin <guro@xxxxxx>
---
mm/cma.c | 21 ++++++++++++---------
mm/cma.h | 2 +-
2 files changed, 13 insertions(+), 10 deletions(-)

diff --git a/mm/cma.c b/mm/cma.c
index be55d1988c67..cb4a3e0a9eeb 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -88,9 +88,9 @@ static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
bitmap_count = cma_bitmap_pages_to_bits(cma, count);

- mutex_lock(&cma->lock);
+ spin_lock(&cma->lock);
bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
- mutex_unlock(&cma->lock);
+ spin_unlock(&cma->lock);
}

static int __init cma_activate_area(struct cma *cma)
@@ -126,7 +126,7 @@ static int __init cma_activate_area(struct cma *cma)
init_cma_reserved_pageblock(pfn_to_page(base_pfn));
} while (--i);

- mutex_init(&cma->lock);
+ spin_lock_init(&cma->lock);

#ifdef CONFIG_CMA_DEBUGFS
INIT_HLIST_HEAD(&cma->mem_head);
@@ -381,22 +381,25 @@ static void cma_debug_show_areas(struct cma *cma)
unsigned long nr_part, nr_total = 0;
unsigned long nbits = cma_bitmap_maxno(cma);

- mutex_lock(&cma->lock);
pr_info("number of available pages: ");
for (;;) {
+ spin_lock(&cma->lock);
next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
- if (next_zero_bit >= nbits)
+ if (next_zero_bit >= nbits) {
+ spin_unlock(&cma->lock);
break;
+ }
next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
nr_zero = next_set_bit - next_zero_bit;
nr_part = nr_zero << cma->order_per_bit;
+ spin_unlock(&cma->lock);
+
pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part,
next_zero_bit);
nr_total += nr_part;
start = next_zero_bit + nr_zero;
}
pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
- mutex_unlock(&cma->lock);
}
#else
static inline void cma_debug_show_areas(struct cma *cma) { }
@@ -441,12 +444,12 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
return NULL;

for (;;) {
- mutex_lock(&cma->lock);
+ spin_lock(&cma->lock);
bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
bitmap_maxno, start, bitmap_count, mask,
offset);
if (bitmap_no >= bitmap_maxno) {
- mutex_unlock(&cma->lock);
+ spin_unlock(&cma->lock);
break;
}
bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
@@ -455,7 +458,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
* our exclusive use. If the migration fails we will take the
* lock again and unmark it.
*/
- mutex_unlock(&cma->lock);
+ spin_unlock(&cma->lock);

pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
mutex_lock(&cma_mutex);
diff --git a/mm/cma.h b/mm/cma.h
index 33c0b517733c..7f5985b11439 100644
--- a/mm/cma.h
+++ b/mm/cma.h
@@ -7,7 +7,7 @@ struct cma {
unsigned long count;
unsigned long *bitmap;
unsigned int order_per_bit; /* Order of pages represented by one bit */
- struct mutex lock;
+ spinlock_t lock;
#ifdef CONFIG_CMA_DEBUGFS
struct hlist_head mem_head;
spinlock_t mem_head_lock;
--
2.25.1