[Suggestion] Simple memory defragmentation

Dr. Werner Fink (werner@suse.de)
Thu, 15 Jan 1998 20:59:27 +0100


Hi,

the appended patch (UNTESTED!) try to solve the memory
fragmentation problem. The idea is simply to seek for more
following free-able pages in shrink_mmap if we (kswapd) found
one with shrink_mmap. This should decrease the random level
on increasing the number of contiguous free pages.
One disadvantage is given: kswapd decreases the number
of cached page more than without this approach.

Werner

----------------------------------------------------------------
diff -ur linux-2.1.79/include/linux/mm.h linux/include/linux/mm.h
--- linux-2.1.79/include/linux/mm.h Tue Jan 13 01:43:16 1998
+++ linux/include/linux/mm.h Thu Jan 15 20:04:28 1998
@@ -137,6 +137,7 @@
#define PG_DMA 7
#define PG_Slab 8
#define PG_swap_cache 9
+#define PG_is_free 10
#define PG_reserved 31

/* Make it prettier to test the above... */
@@ -151,7 +152,9 @@
#define PageSlab(page) (test_bit(PG_Slab, &(page)->flags))
#define PageSwapCache(page) (test_bit(PG_swap_cache, &(page)->flags))
#define PageReserved(page) (test_bit(PG_reserved, &(page)->flags))
+#define PageIsFree(page) (test_bit(PG_is_free, &(page)->flags))

+#define PageSetFree(page) (set_bit(PG_is_free, &(page)->flags))
#define PageSetSlab(page) (set_bit(PG_Slab, &(page)->flags))
#define PageSetSwapCache(page) (set_bit(PG_swap_cache, &(page)->flags))
#define PageTestandSetSwapCache(page) \
@@ -159,6 +162,7 @@

#define PageClearSlab(page) (clear_bit(PG_Slab, &(page)->flags))
#define PageClearSwapCache(page)(clear_bit(PG_swap_cache, &(page)->flags))
+#define PageClearFree (clear_bit(PG_is_free, &(page)->flags)))

#define PageTestandClearSwapCache(page) \
(test_and_clear_bit(PG_swap_cache, &(page)->flags))
diff -ur linux-2.1.79/include/linux/pagemap.h linux/include/linux/pagemap.h
--- linux-2.1.79/include/linux/pagemap.h Tue Jan 13 01:43:16 1998
+++ linux/include/linux/pagemap.h Wed Jan 14 20:25:06 1998
@@ -11,6 +11,7 @@

#include <linux/mm.h>
#include <linux/fs.h>
+#include <linux/swapctl.h>

static inline unsigned long page_address(struct page * page)
{
@@ -20,8 +21,6 @@
#define PAGE_HASH_BITS 11
#define PAGE_HASH_SIZE (1 << PAGE_HASH_BITS)

-#define PAGE_AGE_VALUE 16
-
extern unsigned long page_cache_size; /* # of pages currently in the hash table */
extern struct page * page_hash_table[PAGE_HASH_SIZE];

@@ -84,7 +83,7 @@
{
page_cache_size++;
set_bit(PG_referenced, &page->flags);
- page->age = PAGE_AGE_VALUE;
+ touch_page(page);
if((page->next_hash = *p) != NULL)
(*p)->pprev_hash = &page->next_hash;
*p = page;
diff -ur linux-2.1.79/mm/filemap.c linux/mm/filemap.c
--- linux-2.1.79/mm/filemap.c Mon Jan 12 23:33:20 1998
+++ linux/mm/filemap.c Thu Jan 15 19:38:38 1998
@@ -31,6 +31,11 @@
#include <asm/uaccess.h>

/*
+ * vmscan.c: Set if kswapd is running.
+ */
+extern int kswapd_awake;
+
+/*
* Shared mappings implemented 30.11.1994. It's not fully working yet,
* though.
*
@@ -115,18 +120,22 @@
}
}

+#define ORDER_LVL 4
+
int shrink_mmap(int priority, int dma)
{
static unsigned long clock = 0;
struct page * page;
unsigned long limit = num_physpages;
struct buffer_head *tmp, *bh;
- int count_max, count_min;
+ int count_max, count_min, prev_free = 0;

count_max = (limit<<1) >> (priority>>1);
count_min = (limit<<1) >> (priority);

page = mem_map + clock;
+ if (PageIsFree(page))
+ prev_free++;
do {
count_max--;
if (page->inode || page->buffers)
@@ -169,12 +178,17 @@
remove_page_from_hash_queue(page);
remove_page_from_inode_queue(page);
__free_page(page);
+ if (kswapd_awake && prev_free && prev_free < ORDER_LVL)
+ goto next;
return 1;
}

/* is it a buffer cache page? */
- if (bh && try_to_free_buffer(bh, &bh, 6))
+ if (bh && try_to_free_buffer(bh, &bh, 6)) {
+ if (kswapd_awake && prev_free && prev_free < ORDER_LVL)
+ goto next;
return 1;
+ }
break;

default:
@@ -185,6 +199,10 @@
/* nothing */
}
next:
+ if (PageIsFree(page))
+ prev_free++;
+ else
+ prev_free = 0;
page++;
clock++;
if (clock >= limit) {
diff -ur linux-2.1.79/mm/page_alloc.c linux/mm/page_alloc.c
--- linux-2.1.79/mm/page_alloc.c Mon Jan 12 23:33:20 1998
+++ linux/mm/page_alloc.c Thu Jan 15 20:25:29 1998
@@ -135,6 +135,7 @@
if (!PageReserved(page) && atomic_dec_and_test(&page->count)) {
if (PageSwapCache(page))
panic ("Freeing swap cache page");
+ PageSetFree(page);
free_pages_ok(page->map_nr, 0);
}
if (PageSwapCache(page) && atomic_read(&page->count) == 1)
@@ -152,6 +153,7 @@
if (atomic_dec_and_test(&map->count)) {
if (PageSwapCache(map))
panic ("Freeing swap cache pages");
+ PageSetFree(map);
free_pages_ok(map_nr, order);
return;
}
@@ -182,6 +184,7 @@
nr_free_pages -= 1 << order; \
EXPAND(ret, map_nr, order, new_order, area); \
spin_unlock_irqrestore(&page_alloc_lock, flags); \
+ PageClearFree(ret); \
return ADDRESS(map_nr); \
} \
prev = ret; \
@@ -334,7 +337,8 @@
unsigned long page = __get_free_page(GFP_KERNEL);

if (pte_val(*page_table) != entry) {
- free_page(page);
+ if (page)
+ free_page(page);
return;
}
if (!page) {
diff -ur linux-2.1.79/mm/vmscan.c linux/mm/vmscan.c
--- linux-2.1.79/mm/vmscan.c Mon Jan 12 23:33:20 1998
+++ linux/mm/vmscan.c Thu Jan 15 19:36:08 1998
@@ -45,7 +45,7 @@
/*
* We avoid doing a reschedule if the pageout daemon is already awake;
*/
-static int kswapd_awake = 0;
+int kswapd_awake = 0;

static void init_swap_timer(void);