Re: [PATCH] mm: use pagevec to rotate reclaimable page

From: Hisashi Hifumi
Date: Tue Sep 18 2007 - 06:42:22 EST


I modified my patch based on your comment.

At 11:37 07/09/14, Andrew Morton wrote:
>
>So I do think that for safety and sanity's sake, we should be taking a ref
>on the pages when they are in a pagevec. That's going to hurt your nice
>performance numbers :(
>

I did ping test again to observe performance deterioration caused by taking a ref.

-2.6.23-rc6-with-modifiedpatch
--- testmachine ping statistics ---
3000 packets transmitted, 3000 received, 0% packet loss, time 53386ms
rtt min/avg/max/mdev = 0.074/0.110/4.716/0.147 ms, pipe 2, ipg/ewma 17.801/0.129 ms

The result for my original patch is as follows.

-2.6.23-rc5-with-originalpatch
--- testmachine ping statistics ---
3000 packets transmitted, 3000 received, 0% packet loss, time 51924ms
rtt min/avg/max/mdev = 0.072/0.108/3.884/0.114 ms, pipe 2, ipg/ewma 17.314/0.091 ms


The influence to response was small.

Thanks.

Signed-off-by :Hisashi Hifumi <hifumi.hisashi@xxxxxxxxxxxxx>


diff -Nrup linux-2.6.23-rc6.org/include/linux/swap.h linux-2.6.23-rc6/include/linux/swap.h
--- linux-2.6.23-rc6.org/include/linux/swap.h 2007-09-14 16:49:57.000000000 +0900
+++ linux-2.6.23-rc6/include/linux/swap.h 2007-09-14 16:58:48.000000000 +0900
@@ -185,6 +185,7 @@ extern void FASTCALL(mark_page_accessed(
extern void lru_add_drain(void);
extern int lru_add_drain_all(void);
extern int rotate_reclaimable_page(struct page *page);
+extern void move_tail_pages(void);
extern void swap_setup(void);

/* linux/mm/vmscan.c */
diff -Nrup linux-2.6.23-rc6.org/mm/swap.c linux-2.6.23-rc6/mm/swap.c
--- linux-2.6.23-rc6.org/mm/swap.c 2007-07-09 08:32:17.000000000 +0900
+++ linux-2.6.23-rc6/mm/swap.c 2007-09-18 18:49:07.000000000 +0900
@@ -94,24 +94,62 @@ void put_pages_list(struct list_head *pa
EXPORT_SYMBOL(put_pages_list);

/*
+ * pagevec_move_tail() must be called with IRQ disabled.
+ * Otherwise this may cause nasty races.
+ */
+static void pagevec_move_tail(struct pagevec *pvec)
+{
+ int i;
+ int pgmoved = 0;
+ struct zone *zone = NULL;
+ unsigned long flags = 0;
+
+ for (i = 0; i < pagevec_count(pvec); i++) {
+ struct page *page = pvec->pages[i];
+ struct zone *pagezone = page_zone(page);
+
+ if (pagezone != zone) {
+ if (zone)
+ spin_unlock_irqrestore(&zone->lru_lock, flags);
+ zone = pagezone;
+ spin_lock_irqsave(&zone->lru_lock, flags);
+ }
+ if (PageLRU(page) && !PageActive(page)) {
+ list_move_tail(&page->lru, &zone->inactive_list);
+ pgmoved++;
+ }
+ }
+ if (zone)
+ spin_unlock_irqrestore(&zone->lru_lock, flags);
+ __count_vm_events(PGROTATED, pgmoved);
+ release_pages(pvec->pages, pvec->nr, pvec->cold);
+ pagevec_reinit(pvec);
+}
+
+static DEFINE_PER_CPU(struct pagevec, rotate_pvecs) = { 0, };
+
+void move_tail_pages()
+{
+ unsigned long flags;
+ struct pagevec *pvec;
+
+ local_irq_save(flags);
+ pvec = &__get_cpu_var(rotate_pvecs);
+ if (pagevec_count(pvec))
+ pagevec_move_tail(pvec);
+ local_irq_restore(flags);
+}
+
+/*
* Writeback is about to end against a page which has been marked for immediate
* reclaim. If it still appears to be reclaimable, move it to the tail of the
- * inactive list. The page still has PageWriteback set, which will pin it.
- *
- * We don't expect many pages to come through here, so don't bother batching
- * things up.
- *
- * To avoid placing the page at the tail of the LRU while PG_writeback is still
- * set, this function will clear PG_writeback before performing the page
- * motion. Do that inside the lru lock because once PG_writeback is cleared
- * we may not touch the page.
+ * inactive list.
*
* Returns zero if it cleared PG_writeback.
*/
int rotate_reclaimable_page(struct page *page)
{
- struct zone *zone;
- unsigned long flags;
+ struct pagevec *pvec;

if (PageLocked(page))
return 1;
@@ -122,15 +160,15 @@ int rotate_reclaimable_page(struct page
if (!PageLRU(page))
return 1;

- zone = page_zone(page);
- spin_lock_irqsave(&zone->lru_lock, flags);
if (PageLRU(page) && !PageActive(page)) {
- list_move_tail(&page->lru, &zone->inactive_list);
- __count_vm_event(PGROTATED);
+ page_cache_get(page);
+ pvec = &__get_cpu_var(rotate_pvecs);
+ if (!pagevec_add(pvec, page))
+ pagevec_move_tail(pvec);
}
if (!test_clear_page_writeback(page))
BUG();
- spin_unlock_irqrestore(&zone->lru_lock, flags);
+
return 0;
}

@@ -495,6 +533,23 @@ static int cpu_swap_callback(struct noti
}
return NOTIFY_OK;
}
+
+static int cpu_movetail_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned long flags;
+ struct pagevec *pvec;
+
+ if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
+ local_irq_save(flags);
+ pvec = &per_cpu(rotate_pvecs, (long)hcpu);
+ if (pagevec_count(pvec))
+ pagevec_move_tail(pvec);
+ local_irq_restore(flags);
+ }
+
+ return NOTIFY_OK;
+}
#endif /* CONFIG_HOTPLUG_CPU */
#endif /* CONFIG_SMP */

@@ -516,5 +571,6 @@ void __init swap_setup(void)
*/
#ifdef CONFIG_HOTPLUG_CPU
hotcpu_notifier(cpu_swap_callback, 0);
+ hotcpu_notifier(cpu_movetail_callback, 0);
#endif
}
diff -Nrup linux-2.6.23-rc6.org/mm/vmscan.c linux-2.6.23-rc6/mm/vmscan.c
--- linux-2.6.23-rc6.org/mm/vmscan.c 2007-09-14 16:49:57.000000000 +0900
+++ linux-2.6.23-rc6/mm/vmscan.c 2007-09-14 16:58:48.000000000 +0900
@@ -792,6 +792,7 @@ static unsigned long shrink_inactive_lis

pagevec_init(&pvec, 1);

+ move_tail_pages();
lru_add_drain();
spin_lock_irq(&zone->lru_lock);
do {

Attachment: patch2623rc6_pvecrotate.txt
Description: Binary data