[PATCH 4/4] mm/compaction: add compact_unlock_irqrestore to remove repeat code

From: Kemeng Shi
Date: Tue Jul 18 2023 - 23:32:00 EST


Add compact_unlock_irqrestore to remove repeat code. This also make
compact lock functions sereis complete as we can call
compact_lock_irqsave/compact_unlock_irqrestore in pair.

Signed-off-by: Kemeng Shi <shikemeng@xxxxxxxxxxxxxxx>
---
mm/compaction.c | 43 ++++++++++++++++---------------------------
1 file changed, 16 insertions(+), 27 deletions(-)

diff --git a/mm/compaction.c b/mm/compaction.c
index c1dc821ac6e1..eb1d3d9a422c 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -541,6 +541,14 @@ static spinlock_t *compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
return lock;
}

+static inline void compact_unlock_irqrestore(spinlock_t **locked, unsigned long flags)
+{
+ if (*locked) {
+ spin_unlock_irqrestore(*locked, flags);
+ *locked = NULL;
+ }
+}
+
/*
* Compaction requires the taking of some coarse locks that are potentially
* very heavily contended. The lock should be periodically unlocked to avoid
@@ -556,10 +564,7 @@ static spinlock_t *compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
static bool compact_unlock_should_abort(spinlock_t **locked,
unsigned long flags, struct compact_control *cc)
{
- if (*locked) {
- spin_unlock_irqrestore(*locked, flags);
- *locked = NULL;
- }
+ compact_unlock_irqrestore(locked, flags);

if (fatal_signal_pending(current)) {
cc->contended = true;
@@ -671,8 +676,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,

}

- if (locked)
- spin_unlock_irqrestore(locked, flags);
+ compact_unlock_irqrestore(&locked, flags);

/*
* There is a tiny chance that we have read bogus compound_order(),
@@ -935,10 +939,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
}

if (PageHuge(page) && cc->alloc_contig) {
- if (locked) {
- spin_unlock_irqrestore(locked, flags);
- locked = NULL;
- }
+ compact_unlock_irqrestore(&locked, flags);

ret = isolate_or_dissolve_huge_page(page, &cc->migratepages);

@@ -1024,10 +1025,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
*/
if (unlikely(__PageMovable(page)) &&
!PageIsolated(page)) {
- if (locked) {
- spin_unlock_irqrestore(locked, flags);
- locked = NULL;
- }
+ compact_unlock_irqrestore(&locked, flags);

if (isolate_movable_page(page, mode)) {
folio = page_folio(page);
@@ -1111,9 +1109,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,

/* If we already hold the lock, we can skip some rechecking */
if (&lruvec->lru_lock != locked) {
- if (locked)
- spin_unlock_irqrestore(locked, flags);
-
+ compact_unlock_irqrestore(&locked, flags);
locked = compact_lock_irqsave(&lruvec->lru_lock, &flags, cc);

lruvec_memcg_debug(lruvec, folio);
@@ -1176,10 +1172,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,

isolate_fail_put:
/* Avoid potential deadlock in freeing page under lru_lock */
- if (locked) {
- spin_unlock_irqrestore(locked, flags);
- locked = NULL;
- }
+ compact_unlock_irqrestore(&locked, flags);
folio_put(folio);

isolate_fail:
@@ -1192,10 +1185,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
* page anyway.
*/
if (nr_isolated) {
- if (locked) {
- spin_unlock_irqrestore(locked, flags);
- locked = NULL;
- }
+ compact_unlock_irqrestore(&locked, flags);
putback_movable_pages(&cc->migratepages);
cc->nr_migratepages = 0;
nr_isolated = 0;
@@ -1224,8 +1214,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
folio = NULL;

isolate_abort:
- if (locked)
- spin_unlock_irqrestore(locked, flags);
+ compact_unlock_irqrestore(&locked, flags);
if (folio) {
folio_set_lru(folio);
folio_put(folio);
--
2.30.0