Pagecache size limitation

Larry Woodman (woodman@missioncriticallinux.com)
Thu, 18 Nov 1999 16:00:26 -0500


This is a multi-part message in MIME format.
--------------CEB64C33B8A2A9657F51C433
Content-Type: text/plain; charset=us-ascii
Content-Transfer-Encoding: 7bit

Here is a patch for 2.3.28 to enforce the
/proc/sys/vm/pagecache.max-percent(third argument).
It help prevent the system from pausing while large files are being read
or written.

Larry Woodman
http://www.missioncriticallinux.com

--------------CEB64C33B8A2A9657F51C433
Content-Type: text/plain; charset=us-ascii;
name="patch-2.3.28.pagecache"
Content-Transfer-Encoding: 7bit
Content-Disposition: inline;
filename="patch-2.3.28.pagecache"

diff --recursive linux.van/include/linux/mm.h linux.pc/include/linux/mm.h
16a17
> extern atomic_t swap_cache_pages;
443a445,450
> #define pagecache_over_max() (((unsigned long)atomic_read(&page_cache_size) - \
> atomic_read(&swap_cache_pages)) <= \
> ((page_cache.max_percent)*num_physpages/100)) ? \
> 0 : ((atomic_read(&page_cache_size) - \
> atomic_read(&swap_cache_pages))- \
> (page_cache.max_percent*num_physpages/100))
diff --recursive linux.van/include/linux/pagemap.h linux.pc/include/linux/pagemap.h
30c30
< #define page_cache_alloc() alloc_pages(GFP_HIGHUSER, 0)

---
> extern struct page * page_cache_alloc(void);
diff --recursive linux.van/mm/filemap.c linux.pc/mm/filemap.c
362a363,454
> /* Try to shrink the pagecache */
> void shrink_pagecache(long over)
> {
>         int count;
>         LIST_HEAD(young);
>         LIST_HEAD(old);
>         struct list_head *page_lru = 0;
>         struct page * page;
> 
>         spin_lock(&pagemap_lru_lock);
>         count = nr_lru_pages;
> 
>         while ((count-- > 1) && (over > 0) && (page_lru = lru_cache.prev) != &lru_cache) {
>                 page = list_entry(page_lru, struct page, lru);
>                 list_del(page_lru);
> 
> 		/* if the page has been used just put it back on the tail */
>                 if (test_and_clear_bit(PG_referenced, &page->flags)) {
>                         list_add(page_lru, &lru_cache);
>                         continue;
>                 }
> 		spin_unlock(&pagemap_lru_lock);
> 
> 		/* dont bother with dma-able or high memory pages here */
>                 if (PageDMA(page) || PageHighMem(page)) {
>                         list_add(page_lru, &old);
> 			spin_lock(&pagemap_lru_lock);
>                         continue;
>                 }
> 
> 		/* skip over swapcache and currently locked pages this time */
>                 if (PageSwapCache(page) || TryLockPage(page)) {
>                         list_add(page_lru, &young);
> 			spin_lock(&pagemap_lru_lock);
>                         continue;
>                 }
> 
>                 /* protect next if statement */
>                 spin_lock(&pagecache_lock);
>                 get_page(page);
> 
>                 /* available if mapping&count==2 or mapping&buffers&count=3 */
>                 if (page->mapping && (page_count(page)==2 ||
>                                      (page->buffers && page_count(page)==3))) {
>                         if (page->buffers) {
>                                 spin_unlock(&pagecache_lock);
>                                 if (!try_to_free_buffers(page)) {
>                                         list_add(page_lru, &old);
>                                         UnlockPage(page);
>                                         put_page(page);
>                                         spin_lock(&pagemap_lru_lock);
>                                         continue;
>                                 } else
>                                         spin_lock(&pagecache_lock);
>                         }
>                         if (page->mapping) {
>                                 remove_page_from_inode_queue(page);
>                                 remove_page_from_hash_queue(page);
>                                 page->mapping = NULL;
>                                 spin_unlock(&pagecache_lock);
>                                 UnlockPage(page);
>                                 put_page(page);
>                                 page_cache_release(page);
>                                 over--;
>                                 spin_lock(&pagemap_lru_lock);
>                                 nr_lru_pages--;
>                         }
>                 } else {
>                         spin_unlock(&pagecache_lock);
>                         list_add(page_lru, &young);
>                         UnlockPage(page);
>                         put_page(page);
>                         spin_lock(&pagemap_lru_lock);
>                 }
>         }
> 
> 	/* put the young pages on the head and the old pages on the tail */
>         list_splice(&young, &lru_cache);
>         list_splice(&old, lru_cache.prev);
>         spin_unlock(&pagemap_lru_lock);
>         return;
> }
>  
> struct page * page_cache_alloc(void)
> {
>         unsigned long over;
> 
>         if ((over = pagecache_over_max()) > 0)
>                 shrink_pagecache(over);
>         return(alloc_pages(GFP_HIGHUSER, 0));
> }
> 
diff --recursive linux.van/mm/swap_state.c linux.pc/mm/swap_state.c
27a28,29
> atomic_t swap_cache_pages = ATOMIC_INIT(0);
> 
52a55
> 	atomic_inc(&swap_cache_pages);
152a156
> 	atomic_dec(&swap_cache_pages);

--------------CEB64C33B8A2A9657F51C433--

- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.rutgers.edu Please read the FAQ at http://www.tux.org/lkml/