1 diff -uprN linux-2.6.28/include/linux/mm_inline.h linux-2.6.28.new/include/linux/mm_inline.h
2 --- linux-2.6.28/include/linux/mm_inline.h 2011-05-29 23:04:39.279792774 +0200
3 +++ linux-2.6.28.new/include/linux/mm_inline.h 2011-05-29 22:57:41.749553427 +0200
4 @@ -24,13 +24,23 @@ static inline int page_is_file_cache(str
8 -add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l)
9 +__add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l, int tail)
11 - list_add(&page->lru, &zone->lru[l].list);
12 + /* See if this should be added to the tail of this lru list */
14 + list_add_tail(&page->lru, &zone->lru[l].list);
16 + list_add(&page->lru, &zone->lru[l].list);
17 __inc_zone_state(zone, NR_LRU_BASE + l);
21 +add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l)
23 + __add_page_to_lru_list(zone, page, l, 0);
27 del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l)
30 diff -uprN linux-2.6.28/include/linux/pagemap.h linux-2.6.28.new/include/linux/pagemap.h
31 --- linux-2.6.28/include/linux/pagemap.h 2011-05-29 13:06:56.528715604 +0200
32 +++ linux-2.6.28.new/include/linux/pagemap.h 2011-05-29 13:15:14.226937454 +0200
33 @@ -438,6 +438,8 @@ int add_to_page_cache_locked(struct page
34 pgoff_t index, gfp_t gfp_mask);
35 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
36 pgoff_t index, gfp_t gfp_mask);
37 +int __add_to_page_cache_lru(struct page *page, struct address_space *mapping,
38 + pgoff_t offset, gfp_t gfp_mask, int tail);
39 extern void remove_from_page_cache(struct page *page);
40 extern void __remove_from_page_cache(struct page *page);
42 diff -uprN linux-2.6.28/include/linux/swap.h linux-2.6.28.new/include/linux/swap.h
43 --- linux-2.6.28/include/linux/swap.h 2011-05-29 23:04:39.283792362 +0200
44 +++ linux-2.6.28.new/include/linux/swap.h 2011-05-29 13:14:05.101850740 +0200
45 @@ -201,6 +201,7 @@ extern unsigned int nr_free_pagecache_pa
49 +extern void ____lru_cache_add(struct page *, enum lru_list lru, int tail);
50 extern void __lru_cache_add(struct page *, enum lru_list lru);
51 extern void lru_cache_add_lru(struct page *, enum lru_list lru);
52 extern void lru_cache_add_active_or_unevictable(struct page *,
53 @@ -228,9 +229,14 @@ static inline void lru_cache_add_active_
54 __lru_cache_add(page, LRU_ACTIVE_ANON);
57 +static inline void lru_cache_add_file_tail(struct page *page, int tail)
59 + ____lru_cache_add(page, LRU_INACTIVE_FILE, tail);
62 static inline void lru_cache_add_file(struct page *page)
64 - __lru_cache_add(page, LRU_INACTIVE_FILE);
65 + ____lru_cache_add(page, LRU_INACTIVE_FILE, 0);
68 static inline void lru_cache_add_active_file(struct page *page)
69 diff -uprN linux-2.6.28/mm/filemap.c linux-2.6.28.new/mm/filemap.c
70 --- linux-2.6.28/mm/filemap.c 2011-05-29 23:04:39.283792362 +0200
71 +++ linux-2.6.28.new/mm/filemap.c 2011-05-30 08:33:48.598237695 +0200
72 @@ -492,8 +492,8 @@ out:
74 EXPORT_SYMBOL(add_to_page_cache_locked);
76 -int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
77 - pgoff_t offset, gfp_t gfp_mask)
78 +int __add_to_page_cache_lru(struct page *page, struct address_space *mapping,
79 + pgoff_t offset, gfp_t gfp_mask, int tail)
83 @@ -509,13 +509,20 @@ int add_to_page_cache_lru(struct page *p
84 ret = add_to_page_cache(page, mapping, offset, gfp_mask);
86 if (page_is_file_cache(page))
87 - lru_cache_add_file(page);
88 + lru_cache_add_file_tail(page, tail);
90 lru_cache_add_active_anon(page);
95 +int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
96 + pgoff_t offset, gfp_t gfp_mask)
98 + return __add_to_page_cache_lru(page, mapping, offset, gfp_mask, 0);
103 struct page *__page_cache_alloc(gfp_t gfp)
105 diff -uprN linux-2.6.28/mm/readahead.c linux-2.6.28.new/mm/readahead.c
106 --- linux-2.6.28/mm/readahead.c 2011-05-29 13:07:40.956272555 +0200
107 +++ linux-2.6.28.new/mm/readahead.c 2011-05-30 09:41:46.938330739 +0200
109 #include <linux/task_io_accounting_ops.h>
110 #include <linux/pagevec.h>
111 #include <linux/pagemap.h>
112 +#include <linux/swap.h>
114 void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
116 @@ -83,7 +84,7 @@ int read_cache_pages(struct address_spac
117 EXPORT_SYMBOL(read_cache_pages);
119 static int read_pages(struct address_space *mapping, struct file *filp,
120 - struct list_head *pages, unsigned nr_pages)
121 + struct list_head *pages, unsigned nr_pages, int tail)
125 @@ -98,8 +99,8 @@ static int read_pages(struct address_spa
126 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
127 struct page *page = list_to_page(pages);
128 list_del(&page->lru);
129 - if (!add_to_page_cache_lru(page, mapping,
130 - page->index, GFP_KERNEL)) {
131 + if (!__add_to_page_cache_lru(page, mapping,
132 + page->index, GFP_KERNEL, tail)) {
133 mapping->a_ops->readpage(filp, page);
135 page_cache_release(page);
136 @@ -109,6 +110,28 @@ out:
140 +static inline int nr_mapped(void)
142 + return global_page_state(NR_FILE_MAPPED) +
143 + global_page_state(NR_ANON_PAGES);
147 + * This examines how large in pages a file size is and returns 1 if it is
148 + * more than half the unmapped ram. Avoid doing read_page_state which is
149 + * expensive unless we already know it is likely to be large enough.
151 +static int large_isize(unsigned long nr_pages)
153 + if (nr_pages * 6 > vm_total_pages) {
154 + unsigned long unmapped_ram = vm_total_pages - nr_mapped();
156 + if (nr_pages * 2 > unmapped_ram)
163 * do_page_cache_readahead actually reads a chunk of disk. It allocates all
164 * the pages first, then submits them all for I/O. This avoids the very bad
165 @@ -169,7 +192,8 @@ __do_page_cache_readahead(struct address
166 * will then handle the error.
169 - read_pages(mapping, filp, &page_pool, ret);
170 + read_pages(mapping, filp, &page_pool, ret,
171 + large_isize(end_index));
172 BUG_ON(!list_empty(&page_pool));
175 diff -uprN linux-2.6.28/mm/swap.c linux-2.6.28.new/mm/swap.c
176 --- linux-2.6.28/mm/swap.c 2011-05-29 23:04:39.291791521 +0200
177 +++ linux-2.6.28.new/mm/swap.c 2011-05-29 23:04:00.407680115 +0200
178 @@ -196,22 +196,33 @@ void mark_page_accessed(struct page *pag
180 EXPORT_SYMBOL(mark_page_accessed);
182 -void __lru_cache_add(struct page *page, enum lru_list lru)
183 +void ______pagevec_lru_add(struct pagevec *pvec, enum lru_list lru, int tail);
185 +void ____lru_cache_add(struct page *page, enum lru_list lru, int tail)
187 struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru];
189 page_cache_get(page);
190 if (!pagevec_add(pvec, page))
191 - ____pagevec_lru_add(pvec, lru);
192 + ______pagevec_lru_add(pvec, lru, tail);
193 put_cpu_var(lru_add_pvecs);
196 +EXPORT_SYMBOL(____lru_cache_add);
198 +void __lru_cache_add(struct page *page, enum lru_list lru)
200 + ____lru_cache_add(page, lru, 0);
203 +EXPORT_SYMBOL(__lru_cache_add);
206 * lru_cache_add_lru - add a page to a page list
207 * @page: the page to be added to the LRU.
208 * @lru: the LRU list to which the page is added.
210 -void lru_cache_add_lru(struct page *page, enum lru_list lru)
211 +void __lru_cache_add_lru(struct page *page, enum lru_list lru, int tail)
213 if (PageActive(page)) {
214 VM_BUG_ON(PageUnevictable(page));
215 @@ -222,7 +233,12 @@ void lru_cache_add_lru(struct page *page
218 VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page));
219 - __lru_cache_add(page, lru);
220 + ____lru_cache_add(page, lru, tail);
223 +void lru_cache_add_lru(struct page *page, enum lru_list lru)
225 + __lru_cache_add_lru(page, lru, 0);
229 @@ -423,7 +439,7 @@ void __pagevec_release_nonlru(struct pag
230 * Add the passed pages to the LRU, then drop the caller's refcount
231 * on them. Reinitialises the caller's pagevec.
233 -void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
234 +void ______pagevec_lru_add(struct pagevec *pvec, enum lru_list lru, int tail)
237 struct zone *zone = NULL;
238 @@ -450,7 +466,7 @@ void ____pagevec_lru_add(struct pagevec
240 zone->recent_rotated[file]++;
242 - add_page_to_lru_list(zone, page, lru);
243 + __add_page_to_lru_list(zone, page, lru, tail);
246 spin_unlock_irq(&zone->lru_lock);
247 @@ -458,6 +474,11 @@ void ____pagevec_lru_add(struct pagevec
248 pagevec_reinit(pvec);
251 +void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
253 + ______pagevec_lru_add(pvec, lru, 0);
256 EXPORT_SYMBOL(____pagevec_lru_add);