Re: 2.1.76, memory fragmentation, BAD PATCH

kwrohrer@enteract.com
Sun, 4 Jan 1998 00:29:33 -0600 (CST)


And lo, kwrohrer@enteract.com saith unto me:
> What about a patch like the following? It puts DMA-capable pages last on
^^^^^^^^^^^^^^^^^^

The patch I just sent is not only intended for theoretical analysis only,
it's broken, and only the obvious error causes any compilation problems.

Rabid and fearless troubleshooters may wish to attempt the following patch
instead...I'll even try it myself momentarily.

Keith

*** mm/page_alloc.c.old Thu Jan 1 17:30:55 1998
--- mm/page_alloc.c Sun Jan 4 00:26:26 1998
***************
*** 62,75 ****
head->prev = memory_head(head);
}

static inline void add_mem_queue(struct free_area_struct * head, struct page * entry)
{
! struct page * next = head->next;

! entry->prev = memory_head(head);
! entry->next = next;
! next->prev = entry;
! head->next = entry;
}

static inline void remove_mem_queue(struct page * entry)
--- 62,87 ----
head->prev = memory_head(head);
}

+
+ /* NOTE: this should be #ifdefed to make sense on all architectures */
+ #define KeepTogether(entry) PageDMA(entry)
static inline void add_mem_queue(struct free_area_struct * head, struct page * entry)
{
! if (!KeepTogether(entry)) {
! struct page * next = head->next;

! entry->prev = memory_head(head);
! entry->next = next;
! next->prev = entry;
! head->next = entry;
! } else {
! struct page * prev = head->prev;
!
! entry->next = memory_head(head);
! entry->prev = prev;
! prev->next = entry;
! head->prev = entry;
! }
}

static inline void remove_mem_queue(struct page * entry)
***************
*** 161,176 ****
change_bit((index) >> (1+(order)), (area)->map)
#define CAN_DMA(x) (PageDMA(x))
#define ADDRESS(x) (PAGE_OFFSET + ((x) << PAGE_SHIFT))
! #define RMQUEUE(order, maxorder, dma) \
! do { struct free_area_struct * area = free_area+order; \
! unsigned long new_order = order; \
do { struct page *prev = memory_head(area), *ret = prev->next; \
while (memory_head(area) != ret) { \
if (new_order >= maxorder && ret->next == prev) \
break; \
if (!dma || CAN_DMA(ret)) { \
unsigned long map_nr = ret->map_nr; \
! (prev->next = ret->next)->prev = prev; \
MARK_USED(map_nr, new_order, area); \
nr_free_pages -= 1 << order; \
EXPAND(ret, map_nr, order, new_order, area); \
--- 173,206 ----
change_bit((index) >> (1+(order)), (area)->map)
#define CAN_DMA(x) (PageDMA(x))
#define ADDRESS(x) (PAGE_OFFSET + ((x) << PAGE_SHIFT))
! #define RMQUEUE(order, maxorder, dma) { \
! if (order<2 && !dma) { \
! struct free_area_struct * area = free_area+order; \
! unsigned long new_order = order; \
do { struct page *prev = memory_head(area), *ret = prev->next; \
while (memory_head(area) != ret) { \
+ unsigned long map_nr = ret->map_nr; \
+ if (new_order >= maxorder && ret->next == prev) \
+ break; \
+ (prev->next = ret->next)->prev = prev; \
+ MARK_USED(map_nr, new_order, area); \
+ nr_free_pages -= 1 << order; \
+ EXPAND(ret, map_nr, order, new_order, area); \
+ spin_unlock_irqrestore(&page_alloc_lock, flags); \
+ return ADDRESS(map_nr); \
+ } \
+ new_order++; area++; \
+ } while (new_order < NR_MEM_LISTS); \
+ } else { \
+ struct free_area_struct * area = free_area+order; \
+ unsigned long new_order = order; \
+ do { struct page *prev = memory_head(area), *ret = prev->prev; \
+ while (memory_head(area) != ret) { \
if (new_order >= maxorder && ret->next == prev) \
break; \
if (!dma || CAN_DMA(ret)) { \
unsigned long map_nr = ret->map_nr; \
! (prev->prev = ret->prev)->next = prev; \
MARK_USED(map_nr, new_order, area); \
nr_free_pages -= 1 << order; \
EXPAND(ret, map_nr, order, new_order, area); \
***************
*** 178,188 ****
return ADDRESS(map_nr); \
} \
prev = ret; \
! ret = ret->next; \
} \
new_order++; area++; \
} while (new_order < NR_MEM_LISTS); \
! } while (0)

#define EXPAND(map,index,low,high,area) \
do { unsigned long size = 1 << high; \
--- 208,219 ----
return ADDRESS(map_nr); \
} \
prev = ret; \
! ret = ret->prev; \
} \
new_order++; area++; \
} while (new_order < NR_MEM_LISTS); \
! } }
!

#define EXPAND(map,index,low,high,area) \
do { unsigned long size = 1 << high; \