+diff -uprN linux-2.6.28/include/linux/mm_inline.h linux-2.6.28.new/include/linux/mm_inline.h
+--- linux-2.6.28/include/linux/mm_inline.h 2011-05-29 23:04:39.279792774 +0200
++++ linux-2.6.28.new/include/linux/mm_inline.h 2011-05-29 22:57:41.749553427 +0200
+@@ -24,13 +24,23 @@ static inline int page_is_file_cache(str
+ }
+
+ static inline void
+-add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l)
++__add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l, int tail)
+ {
+- list_add(&page->lru, &zone->lru[l].list);
++ /* See if this should be added to the tail of this lru list */
++ if (tail)
++ list_add_tail(&page->lru, &zone->lru[l].list);
++ else
++ list_add(&page->lru, &zone->lru[l].list);
+ __inc_zone_state(zone, NR_LRU_BASE + l);
+ }
+
+ static inline void
++add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l)
++{
++ __add_page_to_lru_list(zone, page, l, 0);
++}
++
++static inline void
+ del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l)
+ {
+ list_del(&page->lru);
+diff -uprN linux-2.6.28/include/linux/pagemap.h linux-2.6.28.new/include/linux/pagemap.h
+--- linux-2.6.28/include/linux/pagemap.h 2011-05-29 13:06:56.528715604 +0200
++++ linux-2.6.28.new/include/linux/pagemap.h 2011-05-29 13:15:14.226937454 +0200
+@@ -438,6 +438,8 @@ int add_to_page_cache_locked(struct page
+ pgoff_t index, gfp_t gfp_mask);
+ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
+ pgoff_t index, gfp_t gfp_mask);
++int __add_to_page_cache_lru(struct page *page, struct address_space *mapping,
++ pgoff_t offset, gfp_t gfp_mask, int tail);
+ extern void remove_from_page_cache(struct page *page);
+ extern void __remove_from_page_cache(struct page *page);
+
+diff -uprN linux-2.6.28/include/linux/swap.h linux-2.6.28.new/include/linux/swap.h
+--- linux-2.6.28/include/linux/swap.h 2011-05-29 23:04:39.283792362 +0200
++++ linux-2.6.28.new/include/linux/swap.h 2011-05-29 13:14:05.101850740 +0200
+@@ -201,6 +201,7 @@ extern unsigned int nr_free_pagecache_pa
+
+
+ /* linux/mm/swap.c */
++extern void ____lru_cache_add(struct page *, enum lru_list lru, int tail);
+ extern void __lru_cache_add(struct page *, enum lru_list lru);
+ extern void lru_cache_add_lru(struct page *, enum lru_list lru);
+ extern void lru_cache_add_active_or_unevictable(struct page *,
+@@ -228,9 +229,14 @@ static inline void lru_cache_add_active_
+ __lru_cache_add(page, LRU_ACTIVE_ANON);
+ }
+
++static inline void lru_cache_add_file_tail(struct page *page, int tail)
++{
++ ____lru_cache_add(page, LRU_INACTIVE_FILE, tail);
++}
++
+ static inline void lru_cache_add_file(struct page *page)
+ {
+- __lru_cache_add(page, LRU_INACTIVE_FILE);
++ ____lru_cache_add(page, LRU_INACTIVE_FILE, 0);
+ }
+
+ static inline void lru_cache_add_active_file(struct page *page)
+diff -uprN linux-2.6.28/mm/filemap.c linux-2.6.28.new/mm/filemap.c
+--- linux-2.6.28/mm/filemap.c 2011-05-29 23:04:39.283792362 +0200
++++ linux-2.6.28.new/mm/filemap.c 2011-05-30 08:33:48.598237695 +0200
+@@ -492,8 +492,8 @@ out:
+ }
+ EXPORT_SYMBOL(add_to_page_cache_locked);
+
+-int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
+- pgoff_t offset, gfp_t gfp_mask)
++int __add_to_page_cache_lru(struct page *page, struct address_space *mapping,
++ pgoff_t offset, gfp_t gfp_mask, int tail)
+ {
+ int ret;
+
+@@ -509,13 +509,20 @@ int add_to_page_cache_lru(struct page *p
+ ret = add_to_page_cache(page, mapping, offset, gfp_mask);
+ if (ret == 0) {
+ if (page_is_file_cache(page))
+- lru_cache_add_file(page);
++ lru_cache_add_file_tail(page, tail);
+ else
+ lru_cache_add_active_anon(page);
+ }
+ return ret;
+ }
+
++int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
++ pgoff_t offset, gfp_t gfp_mask)
++{
++ return __add_to_page_cache_lru(page, mapping, offset, gfp_mask, 0);
++}
++
++
+ #ifdef CONFIG_NUMA
+ struct page *__page_cache_alloc(gfp_t gfp)
+ {
+diff -uprN linux-2.6.28/mm/readahead.c linux-2.6.28.new/mm/readahead.c
+--- linux-2.6.28/mm/readahead.c 2011-05-29 13:07:40.956272555 +0200
++++ linux-2.6.28.new/mm/readahead.c 2011-05-30 09:41:46.938330739 +0200
+@@ -16,6 +16,7 @@
+ #include <linux/task_io_accounting_ops.h>
+ #include <linux/pagevec.h>
+ #include <linux/pagemap.h>
++#include <linux/swap.h>
+
+ void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
+ {
+@@ -83,7 +84,7 @@ int read_cache_pages(struct address_spac
+ EXPORT_SYMBOL(read_cache_pages);
+
+ static int read_pages(struct address_space *mapping, struct file *filp,
+- struct list_head *pages, unsigned nr_pages)
++ struct list_head *pages, unsigned nr_pages, int tail)
+ {
+ unsigned page_idx;
+ int ret;
+@@ -98,8 +99,8 @@ static int read_pages(struct address_spa
+ for (page_idx = 0; page_idx < nr_pages; page_idx++) {
+ struct page *page = list_to_page(pages);
+ list_del(&page->lru);
+- if (!add_to_page_cache_lru(page, mapping,
+- page->index, GFP_KERNEL)) {
++ if (!__add_to_page_cache_lru(page, mapping,
++ page->index, GFP_KERNEL, tail)) {
+ mapping->a_ops->readpage(filp, page);
+ }
+ page_cache_release(page);
+@@ -109,6 +110,28 @@ out:
+ return ret;
+ }
+
++static inline int nr_mapped(void)
++{
++ return global_page_state(NR_FILE_MAPPED) +
++ global_page_state(NR_ANON_PAGES);
++}
++
++/*
++ * This examines how large in pages a file size is and returns 1 if it is
++ * more than half the unmapped ram. Avoid doing read_page_state which is
++ * expensive unless we already know it is likely to be large enough.
++ */
++static int large_isize(unsigned long nr_pages)
++{
++ if (nr_pages * 6 > vm_total_pages) {
++ unsigned long unmapped_ram = vm_total_pages - nr_mapped();
++
++ if (nr_pages * 2 > unmapped_ram)
++ return 1;
++ }
++ return 0;
++}
++
+ /*
+ * do_page_cache_readahead actually reads a chunk of disk. It allocates all
+ * the pages first, then submits them all for I/O. This avoids the very bad
+@@ -169,7 +192,8 @@ __do_page_cache_readahead(struct address
+ * will then handle the error.
+ */
+ if (ret)
+- read_pages(mapping, filp, &page_pool, ret);
++ read_pages(mapping, filp, &page_pool, ret,
++ large_isize(end_index));
+ BUG_ON(!list_empty(&page_pool));
+ out:
+ return ret;
+diff -uprN linux-2.6.28/mm/swap.c linux-2.6.28.new/mm/swap.c
+--- linux-2.6.28/mm/swap.c 2011-05-29 23:04:39.291791521 +0200
++++ linux-2.6.28.new/mm/swap.c 2011-05-29 23:04:00.407680115 +0200
+@@ -196,22 +196,33 @@ void mark_page_accessed(struct page *pag
+
+ EXPORT_SYMBOL(mark_page_accessed);
+
+-void __lru_cache_add(struct page *page, enum lru_list lru)
++void ______pagevec_lru_add(struct pagevec *pvec, enum lru_list lru, int tail);
++
++void ____lru_cache_add(struct page *page, enum lru_list lru, int tail)
+ {
+ struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru];
+
+ page_cache_get(page);
+ if (!pagevec_add(pvec, page))
+- ____pagevec_lru_add(pvec, lru);
++ ______pagevec_lru_add(pvec, lru, tail);
+ put_cpu_var(lru_add_pvecs);
+ }
+
++EXPORT_SYMBOL(____lru_cache_add);
++
++void __lru_cache_add(struct page *page, enum lru_list lru)
++{
++ ____lru_cache_add(page, lru, 0);
++}
++
++EXPORT_SYMBOL(__lru_cache_add);
++
+ /**
+ * lru_cache_add_lru - add a page to a page list
+ * @page: the page to be added to the LRU.
+ * @lru: the LRU list to which the page is added.
+ */
+-void lru_cache_add_lru(struct page *page, enum lru_list lru)
++void __lru_cache_add_lru(struct page *page, enum lru_list lru, int tail)
+ {
+ if (PageActive(page)) {
+ VM_BUG_ON(PageUnevictable(page));
+@@ -222,7 +233,12 @@ void lru_cache_add_lru(struct page *page
+ }
+
+ VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page));
+- __lru_cache_add(page, lru);
++ ____lru_cache_add(page, lru, tail);
++}
++
++void lru_cache_add_lru(struct page *page, enum lru_list lru)
++{
++ __lru_cache_add_lru(page, lru, 0);
+ }
+
+ /**
+@@ -423,7 +439,7 @@ void __pagevec_release_nonlru(struct pag
+ * Add the passed pages to the LRU, then drop the caller's refcount
+ * on them. Reinitialises the caller's pagevec.
+ */
+-void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
++void ______pagevec_lru_add(struct pagevec *pvec, enum lru_list lru, int tail)
+ {
+ int i;
+ struct zone *zone = NULL;
+@@ -450,7 +466,7 @@ void ____pagevec_lru_add(struct pagevec
+ SetPageActive(page);
+ zone->recent_rotated[file]++;
+ }
+- add_page_to_lru_list(zone, page, lru);
++ __add_page_to_lru_list(zone, page, lru, tail);
+ }
+ if (zone)
+ spin_unlock_irq(&zone->lru_lock);
+@@ -458,6 +474,11 @@ void ____pagevec_lru_add(struct pagevec
+ pagevec_reinit(pvec);
+ }
+
++void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
++{
++ ______pagevec_lru_add(pvec, lru, 0);
++}
++
+ EXPORT_SYMBOL(____pagevec_lru_add);
+
+ /*