From 4c706ebf98c75b2a2106ad3a6e1dc8eba98136d4 Mon Sep 17 00:00:00 2001 From: Peter Hunt Date: Mon, 30 May 2011 12:48:22 +0000 Subject: [PATCH] Updates to -ck series/patches to reflect BFS 404 changes; revert config to 100 Hz; tidy up class 10 SD fix and swap notify patch --- ...fs357-penalise_fork_depth_account_threads.patch | 282 -------------------- .../debian/patches/class10sd_dto14_fix.diff | 13 +- .../debian/patches/cpufreq-bfs_tweaks.patch | 41 --- .../patches/mm-lru_cache_add_lru_tail-1.patch | 258 ++++++++++++++++++ .../debian/patches/mm-lru_cache_add_lru_tail.patch | 254 ------------------ .../patch_swap_notify_core_support_2.6.28.diff | 11 +- kernel-bfs-2.6.28/debian/patches/series | 3 +- kernel-bfs-2.6.28/debian/rx51power_defconfig | 6 +- 8 files changed, 275 insertions(+), 593 deletions(-) delete mode 100644 kernel-bfs-2.6.28/debian/patches/bfs357-penalise_fork_depth_account_threads.patch delete mode 100644 kernel-bfs-2.6.28/debian/patches/cpufreq-bfs_tweaks.patch create mode 100644 kernel-bfs-2.6.28/debian/patches/mm-lru_cache_add_lru_tail-1.patch delete mode 100644 kernel-bfs-2.6.28/debian/patches/mm-lru_cache_add_lru_tail.patch diff --git a/kernel-bfs-2.6.28/debian/patches/bfs357-penalise_fork_depth_account_threads.patch b/kernel-bfs-2.6.28/debian/patches/bfs357-penalise_fork_depth_account_threads.patch deleted file mode 100644 index 077ee5e..0000000 --- a/kernel-bfs-2.6.28/debian/patches/bfs357-penalise_fork_depth_account_threads.patch +++ /dev/null @@ -1,282 +0,0 @@ -Make it possible to have interactivity and responsiveness at very high load -levels by making deadlines offset by the fork depth from init. This has a -similar effect to 'nice'ing loads that are fork heavy. 'make' is a perfect -example of this and will, with fork_depth_penalty enabled, be felt as much -at 'make -j24' as it normally would be with just 'make'. - -Note that this drastically affects CPU distribution, and also has the -indirect side effect of partitioning CPU entitlement to different users as -well. No assumption as to CPU distribution should be made based on past -behaviour. - -This is achieved by separating out forks to new processes vs new threads. -When a new process is detected, its fork depth is inherited from its parent -across fork() and then is incremented by one. That fork_depth is then used -to cause a relative offset of its deadline. - -This feature is enabled in this patch by default and can be optionally -disabled. - -Threads are kept at the same fork_depth as their parent process, and can -optionally have their CPU entitlement all managed as one process together -by enabling the group_thread_accounting feature. This feature is disabled -by default in this patch, as many desktop applications such as firefox, -amarok, etc are multithreaded. By disabling this feature and enabling the -fork_depth_penalty feature (default) it favours CPU towards desktop -applications. - -Extensive testing is required to ensure this does not cause regressions in -common workloads. - -There are two sysctls to enable/disable these features. - -They are in /proc/sys/kernel/ - -group_thread_accounting - groups CPU accounting by threads -fork_depth_penalty - penalises according to depth of forking from init - --ck - ---- - include/linux/sched.h | 7 +++ - kernel/sched_bfs.c | 88 ++++++++++++++++++++++++++++++++++++++++++++++---- - kernel/sysctl.c | 20 +++++++++++ - 3 files changed, 108 insertions(+), 7 deletions(-) - -Index: linux-2.6.36-rc7-ck1/include/linux/sched.h -=================================================================== ---- linux-2.6.36-rc7-ck1.orig/include/linux/sched.h 2010-10-08 09:39:38.016240768 +1100 -+++ linux-2.6.36-rc7-ck1/include/linux/sched.h 2010-10-08 09:39:53.575007838 +1100 -@@ -1187,10 +1187,15 @@ struct task_struct { - unsigned int rt_priority; - #ifdef CONFIG_SCHED_BFS - int time_slice; -- u64 deadline; -+ /* Virtual deadline in niffies, and when the deadline was set */ -+ u64 deadline, deadline_niffy; - struct list_head run_list; - u64 last_ran; - u64 sched_time; /* sched_clock time spent running */ -+ /* Number of threads currently requesting CPU time */ -+ unsigned long threads_running; -+ /* Depth of forks from init */ -+ int fork_depth; - - unsigned long rt_timeout; - #else /* CONFIG_SCHED_BFS */ -Index: linux-2.6.36-rc7-ck1/kernel/sched_bfs.c -=================================================================== ---- linux-2.6.36-rc7-ck1.orig/kernel/sched_bfs.c 2010-10-08 09:39:37.918242270 +1100 -+++ linux-2.6.36-rc7-ck1/kernel/sched_bfs.c 2010-10-08 11:16:01.382198622 +1100 -@@ -139,6 +139,15 @@ int rr_interval __read_mostly = 6; - int sched_iso_cpu __read_mostly = 70; - - /* -+ * group_thread_accounting - sysctl to decide whether to treat whole thread -+ * groups as a single entity for the purposes of CPU distribution. -+ */ -+int group_thread_accounting __read_mostly; -+ -+/* fork_depth_penalty - Whether to penalise CPU according to fork depth. */ -+int fork_depth_penalty __read_mostly = 1; -+ -+/* - * The relative length of deadline for each priority(nice) level. - */ - static int prio_ratios[PRIO_RANGE] __read_mostly; -@@ -661,11 +670,29 @@ static int isoprio_suitable(void) - return !grq.iso_refractory; - } - -+static inline u64 __task_deadline_diff(struct task_struct *p); -+static inline u64 task_deadline_diff(struct task_struct *p); -+ - /* - * Adding to the global runqueue. Enter with grq locked. - */ - static void enqueue_task(struct task_struct *p) - { -+ s64 max_tdd = task_deadline_diff(p); -+ -+ /* -+ * Make sure that when we're queueing this task again that it -+ * doesn't have any old deadlines from when the thread group was -+ * being penalised and cap the deadline to the highest it could -+ * be, based on the current number of threads running. -+ */ -+ if (group_thread_accounting) { -+ max_tdd += p->group_leader->threads_running * -+ __task_deadline_diff(p); -+ } -+ if (p->deadline - p->deadline_niffy > max_tdd) -+ p->deadline = p->deadline_niffy + max_tdd; -+ - if (!rt_task(p)) { - /* Check it hasn't gotten rt from PI */ - if ((idleprio_task(p) && idleprio_suitable(p)) || -@@ -967,10 +994,13 @@ static int effective_prio(struct task_st - } - - /* -- * activate_task - move a task to the runqueue. Enter with grq locked. -+ * activate_task - move a task to the runqueue. Enter with grq locked. The -+ * number of threads running is stored in the group_leader struct. - */ - static void activate_task(struct task_struct *p, struct rq *rq) - { -+ unsigned long *threads_running = &p->group_leader->threads_running; -+ - update_clocks(rq); - - /* -@@ -987,6 +1017,14 @@ static void activate_task(struct task_st - p->prio = effective_prio(p); - if (task_contributes_to_load(p)) - grq.nr_uninterruptible--; -+ /* -+ * Adjust deadline according to number of running threads within -+ * this thread group. This ends up distributing CPU to the thread -+ * group as a single entity. -+ */ -+ ++*threads_running; -+ if (*threads_running > 1 && group_thread_accounting) -+ p->deadline += __task_deadline_diff(p); - enqueue_task(p); - grq.nr_running++; - inc_qnr(); -@@ -998,9 +1036,14 @@ static void activate_task(struct task_st - */ - static inline void deactivate_task(struct task_struct *p) - { -+ unsigned long *threads_running = &p->group_leader->threads_running; -+ - if (task_contributes_to_load(p)) - grq.nr_uninterruptible++; - grq.nr_running--; -+ --*threads_running; -+ if (*threads_running > 0 && group_thread_accounting) -+ p->deadline -= __task_deadline_diff(p); - } - - #ifdef CONFIG_SMP -@@ -1635,6 +1678,10 @@ void wake_up_new_task(struct task_struct - parent = p->parent; - /* Unnecessary but small chance that the parent changed CPU */ - set_task_cpu(p, task_cpu(parent)); -+ if (!(clone_flags & CLONE_THREAD)) { -+ p->fork_depth++; -+ p->threads_running = 0; -+ } - activate_task(p, rq); - trace_sched_wakeup_new(p, 1); - if (!(clone_flags & CLONE_VM) && rq->curr == parent && -@@ -2524,11 +2571,20 @@ static inline u64 prio_deadline_diff(int - return (prio_ratios[user_prio] * rr_interval * (MS_TO_NS(1) / 128)); - } - --static inline u64 task_deadline_diff(struct task_struct *p) -+static inline u64 __task_deadline_diff(struct task_struct *p) - { - return prio_deadline_diff(TASK_USER_PRIO(p)); - } - -+static inline u64 task_deadline_diff(struct task_struct *p) -+{ -+ u64 pdd = __task_deadline_diff(p); -+ -+ if (fork_depth_penalty && p->fork_depth > 1) -+ pdd *= p->fork_depth; -+ return pdd; -+} -+ - static inline u64 static_deadline_diff(int static_prio) - { - return prio_deadline_diff(USER_PRIO(static_prio)); -@@ -2545,8 +2601,24 @@ static inline int ms_longest_deadline_di - */ - static void time_slice_expired(struct task_struct *p) - { -+ u64 tdd = task_deadline_diff(p); -+ -+ /* -+ * We proportionately increase the deadline according to how many -+ * threads are running. This effectively makes a thread group have -+ * the same CPU as one task, no matter how many threads are running. -+ * time_slice_expired can be called when there may be none running -+ * when p is deactivated so we must explicitly test for more than 1. -+ */ -+ if (group_thread_accounting) { -+ unsigned long *threads_running = &p->group_leader->threads_running; -+ -+ if (*threads_running > 1) -+ tdd += *threads_running * __task_deadline_diff(p); -+ } - p->time_slice = timeslice(); -- p->deadline = grq.niffies + task_deadline_diff(p); -+ p->deadline_niffy = grq.niffies; -+ p->deadline = grq.niffies + tdd; - } - - /* -@@ -3513,7 +3585,7 @@ SYSCALL_DEFINE1(nice, int, increment) - * - * This is the priority value as seen by users in /proc. - * RT tasks are offset by -100. Normal tasks are centered around 1, value goes -- * from 0 (SCHED_ISO) up to 82 (nice +19 SCHED_IDLEPRIO). -+ * from 0 (SCHED_ISO) upwards (to nice +19 SCHED_IDLEPRIO). - */ - int task_prio(const struct task_struct *p) - { -@@ -3525,8 +3597,12 @@ int task_prio(const struct task_struct * - - /* Convert to ms to avoid overflows */ - delta = NS_TO_MS(p->deadline - grq.niffies); -- delta = delta * 40 / ms_longest_deadline_diff(); -- if (delta > 0 && delta <= 80) -+ if (fork_depth_penalty) -+ delta *= 4; -+ else -+ delta *= 40; -+ delta /= ms_longest_deadline_diff(); -+ if (delta > 0) - prio += delta; - if (idleprio_task(p)) - prio += 40; -Index: linux-2.6.36-rc7-ck1/kernel/sysctl.c -=================================================================== ---- linux-2.6.36-rc7-ck1.orig/kernel/sysctl.c 2010-10-08 09:39:11.603648964 +1100 -+++ linux-2.6.36-rc7-ck1/kernel/sysctl.c 2010-10-08 09:39:53.579007778 +1100 -@@ -121,6 +121,8 @@ static int __maybe_unused one_hundred = - #ifdef CONFIG_SCHED_BFS - extern int rr_interval; - extern int sched_iso_cpu; -+extern int group_thread_accounting; -+extern int fork_depth_penalty; - static int __read_mostly one_thousand = 1000; - #endif - #ifdef CONFIG_PRINTK -@@ -834,6 +836,24 @@ static struct ctl_table kern_table[] = { - .extra1 = &zero, - .extra2 = &one_hundred, - }, -+ { -+ .procname = "group_thread_accounting", -+ .data = &group_thread_accounting, -+ .maxlen = sizeof (int), -+ .mode = 0644, -+ .proc_handler = &proc_dointvec_minmax, -+ .extra1 = &zero, -+ .extra2 = &one, -+ }, -+ { -+ .procname = "fork_depth_penalty", -+ .data = &fork_depth_penalty, -+ .maxlen = sizeof (int), -+ .mode = 0644, -+ .proc_handler = &proc_dointvec_minmax, -+ .extra1 = &zero, -+ .extra2 = &one, -+ }, - #endif - #if defined(CONFIG_S390) && defined(CONFIG_SMP) - { diff --git a/kernel-bfs-2.6.28/debian/patches/class10sd_dto14_fix.diff b/kernel-bfs-2.6.28/debian/patches/class10sd_dto14_fix.diff index 45b8f25..300621b 100644 --- a/kernel-bfs-2.6.28/debian/patches/class10sd_dto14_fix.diff +++ b/kernel-bfs-2.6.28/debian/patches/class10sd_dto14_fix.diff @@ -1,14 +1,13 @@ --- kernel-2.6.28/drivers/mmc/host/omap_hsmmc.c 2011-04-28 23:06:37.000000000 +0200 +++ kernel-2.6.28.new/drivers/mmc/host/omap_hsmmc.c 2011-05-05 22:18:25.174891197 +0200 -@@ -958,34 +958,9 @@ static void set_data_timeout(struct omap +@@ -958,36 +958,9 @@ static void set_data_timeout(struct omap unsigned int timeout_ns, unsigned int timeout_clks) { - unsigned int timeout, cycle_ns; - uint32_t reg, clkd, dto = 0; -+ uint32_t reg, dto = 14; - - reg = OMAP_HSMMC_READ(host->base, SYSCTL); +- +- reg = OMAP_HSMMC_READ(host->base, SYSCTL); - clkd = (reg & CLKD_MASK) >> CLKD_SHIFT; - if (clkd == 0) - clkd = 1; @@ -33,6 +32,10 @@ - dto = 14; - } - ++ uint32_t reg = OMAP_HSMMC_READ(host->base, SYSCTL); reg &= ~DTO_MASK; - reg |= dto << DTO_SHIFT; +- reg |= dto << DTO_SHIFT; ++ reg |= DTO << DTO_SHIFT; OMAP_HSMMC_WRITE(host->base, SYSCTL, reg); + } + diff --git a/kernel-bfs-2.6.28/debian/patches/cpufreq-bfs_tweaks.patch b/kernel-bfs-2.6.28/debian/patches/cpufreq-bfs_tweaks.patch deleted file mode 100644 index 877b6bf..0000000 --- a/kernel-bfs-2.6.28/debian/patches/cpufreq-bfs_tweaks.patch +++ /dev/null @@ -1,41 +0,0 @@ -Because of the way BFS works it needs to transition up in frequency more -aggressively and down more conservatively. - --ck - ---- - drivers/cpufreq/cpufreq_ondemand.c | 10 +++++----- - 1 file changed, 5 insertions(+), 5 deletions(-) - -Index: linux-2.6.34-ck1/drivers/cpufreq/cpufreq_ondemand.c -=================================================================== ---- linux-2.6.34-ck1.orig/drivers/cpufreq/cpufreq_ondemand.c 2010-02-25 21:51:48.000000000 +1100 -+++ linux-2.6.34-ck1/drivers/cpufreq/cpufreq_ondemand.c 2010-05-18 12:26:18.124319654 +1000 -@@ -28,10 +28,10 @@ - * It helps to keep variable names smaller, simpler - */ - --#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) --#define DEF_FREQUENCY_UP_THRESHOLD (80) -+#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (17) -+#define DEF_FREQUENCY_UP_THRESHOLD (63) - #define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) --#define MICRO_FREQUENCY_UP_THRESHOLD (95) -+#define MICRO_FREQUENCY_UP_THRESHOLD (80) - #define MIN_FREQUENCY_UP_THRESHOLD (11) - #define MAX_FREQUENCY_UP_THRESHOLD (100) - -@@ -455,10 +455,10 @@ static void dbs_check_cpu(struct cpu_dbs - - /* - * Every sampling_rate, we check, if current idle time is less -- * than 20% (default), then we try to increase frequency -+ * than 37% (default), then we try to increase frequency - * Every sampling_rate, we look for a the lowest - * frequency which can sustain the load while keeping idle time over -- * 30%. If such a frequency exist, we try to decrease to this frequency. -+ * 50%. If such a frequency exist, we try to decrease to this frequency. - * - * Any frequency increase takes it to the maximum frequency. - * Frequency reduction happens at minimum steps of - diff --git a/kernel-bfs-2.6.28/debian/patches/mm-lru_cache_add_lru_tail-1.patch b/kernel-bfs-2.6.28/debian/patches/mm-lru_cache_add_lru_tail-1.patch new file mode 100644 index 0000000..08b1afb --- /dev/null +++ b/kernel-bfs-2.6.28/debian/patches/mm-lru_cache_add_lru_tail-1.patch @@ -0,0 +1,258 @@ +diff -uprN linux-2.6.28/include/linux/mm_inline.h linux-2.6.28.new/include/linux/mm_inline.h +--- linux-2.6.28/include/linux/mm_inline.h 2011-05-29 23:04:39.279792774 +0200 ++++ linux-2.6.28.new/include/linux/mm_inline.h 2011-05-29 22:57:41.749553427 +0200 +@@ -24,13 +24,23 @@ static inline int page_is_file_cache(str + } + + static inline void +-add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l) ++__add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l, int tail) + { +- list_add(&page->lru, &zone->lru[l].list); ++ /* See if this should be added to the tail of this lru list */ ++ if (tail) ++ list_add_tail(&page->lru, &zone->lru[l].list); ++ else ++ list_add(&page->lru, &zone->lru[l].list); + __inc_zone_state(zone, NR_LRU_BASE + l); + } + + static inline void ++add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l) ++{ ++ __add_page_to_lru_list(zone, page, l, 0); ++} ++ ++static inline void + del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l) + { + list_del(&page->lru); +diff -uprN linux-2.6.28/include/linux/pagemap.h linux-2.6.28.new/include/linux/pagemap.h +--- linux-2.6.28/include/linux/pagemap.h 2011-05-29 13:06:56.528715604 +0200 ++++ linux-2.6.28.new/include/linux/pagemap.h 2011-05-29 13:15:14.226937454 +0200 +@@ -438,6 +438,8 @@ int add_to_page_cache_locked(struct page + pgoff_t index, gfp_t gfp_mask); + int add_to_page_cache_lru(struct page *page, struct address_space *mapping, + pgoff_t index, gfp_t gfp_mask); ++int __add_to_page_cache_lru(struct page *page, struct address_space *mapping, ++ pgoff_t offset, gfp_t gfp_mask, int tail); + extern void remove_from_page_cache(struct page *page); + extern void __remove_from_page_cache(struct page *page); + +diff -uprN linux-2.6.28/include/linux/swap.h linux-2.6.28.new/include/linux/swap.h +--- linux-2.6.28/include/linux/swap.h 2011-05-29 23:04:39.283792362 +0200 ++++ linux-2.6.28.new/include/linux/swap.h 2011-05-29 13:14:05.101850740 +0200 +@@ -201,6 +201,7 @@ extern unsigned int nr_free_pagecache_pa + + + /* linux/mm/swap.c */ ++extern void ____lru_cache_add(struct page *, enum lru_list lru, int tail); + extern void __lru_cache_add(struct page *, enum lru_list lru); + extern void lru_cache_add_lru(struct page *, enum lru_list lru); + extern void lru_cache_add_active_or_unevictable(struct page *, +@@ -228,9 +229,14 @@ static inline void lru_cache_add_active_ + __lru_cache_add(page, LRU_ACTIVE_ANON); + } + ++static inline void lru_cache_add_file_tail(struct page *page, int tail) ++{ ++ ____lru_cache_add(page, LRU_INACTIVE_FILE, tail); ++} ++ + static inline void lru_cache_add_file(struct page *page) + { +- __lru_cache_add(page, LRU_INACTIVE_FILE); ++ ____lru_cache_add(page, LRU_INACTIVE_FILE, 0); + } + + static inline void lru_cache_add_active_file(struct page *page) +diff -uprN linux-2.6.28/mm/filemap.c linux-2.6.28.new/mm/filemap.c +--- linux-2.6.28/mm/filemap.c 2011-05-29 23:04:39.283792362 +0200 ++++ linux-2.6.28.new/mm/filemap.c 2011-05-30 08:33:48.598237695 +0200 +@@ -492,8 +492,8 @@ out: + } + EXPORT_SYMBOL(add_to_page_cache_locked); + +-int add_to_page_cache_lru(struct page *page, struct address_space *mapping, +- pgoff_t offset, gfp_t gfp_mask) ++int __add_to_page_cache_lru(struct page *page, struct address_space *mapping, ++ pgoff_t offset, gfp_t gfp_mask, int tail) + { + int ret; + +@@ -509,13 +509,20 @@ int add_to_page_cache_lru(struct page *p + ret = add_to_page_cache(page, mapping, offset, gfp_mask); + if (ret == 0) { + if (page_is_file_cache(page)) +- lru_cache_add_file(page); ++ lru_cache_add_file_tail(page, tail); + else + lru_cache_add_active_anon(page); + } + return ret; + } + ++int add_to_page_cache_lru(struct page *page, struct address_space *mapping, ++ pgoff_t offset, gfp_t gfp_mask) ++{ ++ return __add_to_page_cache_lru(page, mapping, offset, gfp_mask, 0); ++} ++ ++ + #ifdef CONFIG_NUMA + struct page *__page_cache_alloc(gfp_t gfp) + { +diff -uprN linux-2.6.28/mm/readahead.c linux-2.6.28.new/mm/readahead.c +--- linux-2.6.28/mm/readahead.c 2011-05-29 13:07:40.956272555 +0200 ++++ linux-2.6.28.new/mm/readahead.c 2011-05-30 09:41:46.938330739 +0200 +@@ -16,6 +16,7 @@ + #include + #include + #include ++#include + + void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) + { +@@ -83,7 +84,7 @@ int read_cache_pages(struct address_spac + EXPORT_SYMBOL(read_cache_pages); + + static int read_pages(struct address_space *mapping, struct file *filp, +- struct list_head *pages, unsigned nr_pages) ++ struct list_head *pages, unsigned nr_pages, int tail) + { + unsigned page_idx; + int ret; +@@ -98,8 +99,8 @@ static int read_pages(struct address_spa + for (page_idx = 0; page_idx < nr_pages; page_idx++) { + struct page *page = list_to_page(pages); + list_del(&page->lru); +- if (!add_to_page_cache_lru(page, mapping, +- page->index, GFP_KERNEL)) { ++ if (!__add_to_page_cache_lru(page, mapping, ++ page->index, GFP_KERNEL, tail)) { + mapping->a_ops->readpage(filp, page); + } + page_cache_release(page); +@@ -109,6 +110,28 @@ out: + return ret; + } + ++static inline int nr_mapped(void) ++{ ++ return global_page_state(NR_FILE_MAPPED) + ++ global_page_state(NR_ANON_PAGES); ++} ++ ++/* ++ * This examines how large in pages a file size is and returns 1 if it is ++ * more than half the unmapped ram. Avoid doing read_page_state which is ++ * expensive unless we already know it is likely to be large enough. ++ */ ++static int large_isize(unsigned long nr_pages) ++{ ++ if (nr_pages * 6 > vm_total_pages) { ++ unsigned long unmapped_ram = vm_total_pages - nr_mapped(); ++ ++ if (nr_pages * 2 > unmapped_ram) ++ return 1; ++ } ++ return 0; ++} ++ + /* + * do_page_cache_readahead actually reads a chunk of disk. It allocates all + * the pages first, then submits them all for I/O. This avoids the very bad +@@ -169,7 +192,8 @@ __do_page_cache_readahead(struct address + * will then handle the error. + */ + if (ret) +- read_pages(mapping, filp, &page_pool, ret); ++ read_pages(mapping, filp, &page_pool, ret, ++ large_isize(end_index)); + BUG_ON(!list_empty(&page_pool)); + out: + return ret; +diff -uprN linux-2.6.28/mm/swap.c linux-2.6.28.new/mm/swap.c +--- linux-2.6.28/mm/swap.c 2011-05-29 23:04:39.291791521 +0200 ++++ linux-2.6.28.new/mm/swap.c 2011-05-29 23:04:00.407680115 +0200 +@@ -196,22 +196,33 @@ void mark_page_accessed(struct page *pag + + EXPORT_SYMBOL(mark_page_accessed); + +-void __lru_cache_add(struct page *page, enum lru_list lru) ++void ______pagevec_lru_add(struct pagevec *pvec, enum lru_list lru, int tail); ++ ++void ____lru_cache_add(struct page *page, enum lru_list lru, int tail) + { + struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru]; + + page_cache_get(page); + if (!pagevec_add(pvec, page)) +- ____pagevec_lru_add(pvec, lru); ++ ______pagevec_lru_add(pvec, lru, tail); + put_cpu_var(lru_add_pvecs); + } + ++EXPORT_SYMBOL(____lru_cache_add); ++ ++void __lru_cache_add(struct page *page, enum lru_list lru) ++{ ++ ____lru_cache_add(page, lru, 0); ++} ++ ++EXPORT_SYMBOL(__lru_cache_add); ++ + /** + * lru_cache_add_lru - add a page to a page list + * @page: the page to be added to the LRU. + * @lru: the LRU list to which the page is added. + */ +-void lru_cache_add_lru(struct page *page, enum lru_list lru) ++void __lru_cache_add_lru(struct page *page, enum lru_list lru, int tail) + { + if (PageActive(page)) { + VM_BUG_ON(PageUnevictable(page)); +@@ -222,7 +233,12 @@ void lru_cache_add_lru(struct page *page + } + + VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page)); +- __lru_cache_add(page, lru); ++ ____lru_cache_add(page, lru, tail); ++} ++ ++void lru_cache_add_lru(struct page *page, enum lru_list lru) ++{ ++ __lru_cache_add_lru(page, lru, 0); + } + + /** +@@ -423,7 +439,7 @@ void __pagevec_release_nonlru(struct pag + * Add the passed pages to the LRU, then drop the caller's refcount + * on them. Reinitialises the caller's pagevec. + */ +-void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) ++void ______pagevec_lru_add(struct pagevec *pvec, enum lru_list lru, int tail) + { + int i; + struct zone *zone = NULL; +@@ -450,7 +466,7 @@ void ____pagevec_lru_add(struct pagevec + SetPageActive(page); + zone->recent_rotated[file]++; + } +- add_page_to_lru_list(zone, page, lru); ++ __add_page_to_lru_list(zone, page, lru, tail); + } + if (zone) + spin_unlock_irq(&zone->lru_lock); +@@ -458,6 +474,11 @@ void ____pagevec_lru_add(struct pagevec + pagevec_reinit(pvec); + } + ++void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) ++{ ++ ______pagevec_lru_add(pvec, lru, 0); ++} ++ + EXPORT_SYMBOL(____pagevec_lru_add); + + /* diff --git a/kernel-bfs-2.6.28/debian/patches/mm-lru_cache_add_lru_tail.patch b/kernel-bfs-2.6.28/debian/patches/mm-lru_cache_add_lru_tail.patch deleted file mode 100644 index 6509618..0000000 --- a/kernel-bfs-2.6.28/debian/patches/mm-lru_cache_add_lru_tail.patch +++ /dev/null @@ -1,254 +0,0 @@ -When reading from large files through the generic file read functions into -page cache we can detect when a file is so large that it is unlikely to be -fully cached in ram. If that happens we can put it on the tail end of the -inactive lru list so it can be the first thing evicted next time we need ram. - -Do lots of funny buggers with underscores to preserve all the existing APIs. - --ck - ---- - include/linux/mm_inline.h | 14 ++++++++++-- - include/linux/swap.h | 5 ++-- - mm/filemap.c | 51 +++++++++++++++++++++++++++++++++++++++------- - mm/swap.c | 29 ++++++++++++++++++++------ - 4 files changed, 82 insertions(+), 17 deletions(-) - -Index: linux-2.6.32-ck1/include/linux/mm_inline.h -=================================================================== ---- linux-2.6.32-ck1.orig/include/linux/mm_inline.h 2009-12-10 20:47:13.927251742 +1100 -+++ linux-2.6.32-ck1/include/linux/mm_inline.h 2009-12-10 22:45:33.041376670 +1100 -@@ -20,13 +20,23 @@ static inline int page_is_file_cache(str - } - - static inline void --add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l) -+__add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l, int tail) - { -- list_add(&page->lru, &zone->lru[l].list); -+ /* See if this should be added to the tail of this lru list */ -+ if (tail) -+ list_add_tail(&page->lru, &zone->lru[l].list); -+ else -+ list_add(&page->lru, &zone->lru[l].list); - __inc_zone_state(zone, NR_LRU_BASE + l); - } - - static inline void -+add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l) -+{ -+ __add_page_to_lru_list(zone, page, l, 0); -+} -+ -+static inline void - del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l) - { - list_del(&page->lru); -Index: linux-2.6.32-ck1/include/linux/swap.h -=================================================================== ---- linux-2.6.32-ck1.orig/include/linux/swap.h 2009-12-10 20:45:55.306251536 +1100 -+++ linux-2.6.32-ck1/include/linux/swap.h 2009-12-10 22:47:38.415251020 +1100 -@@ -198,6 +198,7 @@ extern unsigned int nr_free_pagecache_pa - - - /* linux/mm/swap.c */ -+extern void ____lru_cache_add(struct page *, enum lru_list lru, int tail); - extern void __lru_cache_add(struct page *, enum lru_list lru); - extern void lru_cache_add_lru(struct page *, enum lru_list lru); - extern void activate_page(struct page *); -@@ -223,9 +224,9 @@ static inline void lru_cache_add_active_ - __lru_cache_add(page, LRU_ACTIVE_ANON); - } - --static inline void lru_cache_add_file(struct page *page) -+static inline void lru_cache_add_file(struct page *page, int tail) - { -- __lru_cache_add(page, LRU_INACTIVE_FILE); -+ ____lru_cache_add(page, LRU_INACTIVE_FILE, tail); - } - - static inline void lru_cache_add_active_file(struct page *page) -Index: linux-2.6.32-ck1/mm/filemap.c -=================================================================== ---- linux-2.6.32-ck1.orig/mm/filemap.c 2009-12-10 20:52:17.597126805 +1100 -+++ linux-2.6.32-ck1/mm/filemap.c 2009-12-10 22:41:11.812251151 +1100 -@@ -454,8 +454,8 @@ out: - } - EXPORT_SYMBOL(add_to_page_cache_locked); - --int add_to_page_cache_lru(struct page *page, struct address_space *mapping, -- pgoff_t offset, gfp_t gfp_mask) -+int __add_to_page_cache_lru(struct page *page, struct address_space *mapping, -+ pgoff_t offset, gfp_t gfp_mask, int tail) - { - int ret; - -@@ -471,12 +471,19 @@ int add_to_page_cache_lru(struct page *p - ret = add_to_page_cache(page, mapping, offset, gfp_mask); - if (ret == 0) { - if (page_is_file_cache(page)) -- lru_cache_add_file(page); -+ lru_cache_add_file(page, tail); - else - lru_cache_add_active_anon(page); - } - return ret; - } -+ -+int add_to_page_cache_lru(struct page *page, struct address_space *mapping, -+ pgoff_t offset, gfp_t gfp_mask) -+{ -+ return __add_to_page_cache_lru(page, mapping, offset, gfp_mask, 0); -+} -+ - - #ifdef CONFIG_NUMA - struct page *__page_cache_alloc(gfp_t gfp) -@@ -970,6 +977,28 @@ static void shrink_readahead_size_eio(st - } - EXPORT_SYMBOL(find_get_pages); - -+static inline int nr_mapped(void) -+{ -+ return global_page_state(NR_FILE_MAPPED) + -+ global_page_state(NR_ANON_PAGES); -+} -+ -+/* -+ * This examines how large in pages a file size is and returns 1 if it is -+ * more than half the unmapped ram. Avoid doing read_page_state which is -+ * expensive unless we already know it is likely to be large enough. -+ */ -+static int large_isize(unsigned long nr_pages) -+{ -+ if (nr_pages * 6 > vm_total_pages) { -+ unsigned long unmapped_ram = vm_total_pages - nr_mapped(); -+ -+ if (nr_pages * 2 > unmapped_ram) -+ return 1; -+ } -+ return 0; -+} -+ - /** - * do_generic_file_read - generic file read routine - * @filp: the file to read -@@ -994,7 +1023,7 @@ static void do_generic_file_read(struct - pgoff_t prev_index; - unsigned long offset; /* offset into pagecache page */ - unsigned int prev_offset; -- int error; -+ int error, tail = 0; - - index = *ppos >> PAGE_CACHE_SHIFT; - prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT; -@@ -1005,7 +1034,7 @@ static void do_generic_file_read(struct - for (;;) { - struct page *page; - pgoff_t end_index; -- loff_t isize; -+ loff_t isize = 0; - unsigned long nr, ret; - - cond_resched(); -@@ -1170,8 +1199,16 @@ no_cached_page: - desc->error = -ENOMEM; - goto out; - } -- error = add_to_page_cache_lru(page, mapping, -- index, GFP_KERNEL); -+ /* -+ * If we know the file is large we add the pages read to the -+ * end of the lru as we're unlikely to be able to cache the -+ * whole file in ram so make those pages the first to be -+ * dropped if not referenced soon. -+ */ -+ if (large_isize(end_index)) -+ tail = 1; -+ error = __add_to_page_cache_lru(page, mapping, -+ index, GFP_KERNEL, tail); - if (error) { - page_cache_release(page); - if (error == -EEXIST) -Index: linux-2.6.32-ck1/mm/swap.c -=================================================================== ---- linux-2.6.32-ck1.orig/mm/swap.c 2009-12-10 20:45:55.320251262 +1100 -+++ linux-2.6.32-ck1/mm/swap.c 2009-12-10 22:53:10.138009481 +1100 -@@ -214,22 +214,29 @@ void mark_page_accessed(struct page *pag - - EXPORT_SYMBOL(mark_page_accessed); - --void __lru_cache_add(struct page *page, enum lru_list lru) -+void ______pagevec_lru_add(struct pagevec *pvec, enum lru_list lru, int tail); -+ -+void ____lru_cache_add(struct page *page, enum lru_list lru, int tail) - { - struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru]; - - page_cache_get(page); - if (!pagevec_add(pvec, page)) -- ____pagevec_lru_add(pvec, lru); -+ ______pagevec_lru_add(pvec, lru, tail); - put_cpu_var(lru_add_pvecs); - } - -+void __lru_cache_add(struct page *page, enum lru_list lru) -+{ -+ ____lru_cache_add(page, lru, 0); -+} -+ - /** - * lru_cache_add_lru - add a page to a page list - * @page: the page to be added to the LRU. - * @lru: the LRU list to which the page is added. - */ --void lru_cache_add_lru(struct page *page, enum lru_list lru) -+void __lru_cache_add_lru(struct page *page, enum lru_list lru, int tail) - { - if (PageActive(page)) { - VM_BUG_ON(PageUnevictable(page)); -@@ -240,7 +247,12 @@ void lru_cache_add_lru(struct page *page - } - - VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page)); -- __lru_cache_add(page, lru); -+ ____lru_cache_add(page, lru, tail); -+} -+ -+void lru_cache_add_lru(struct page *page, enum lru_list lru) -+{ -+ __lru_cache_add_lru(page, lru, 0); - } - - /** -@@ -400,7 +412,7 @@ EXPORT_SYMBOL(__pagevec_release); - * Add the passed pages to the LRU, then drop the caller's refcount - * on them. Reinitialises the caller's pagevec. - */ --void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) -+void ______pagevec_lru_add(struct pagevec *pvec, enum lru_list lru, int tail) - { - int i; - struct zone *zone = NULL; -@@ -428,7 +440,7 @@ void ____pagevec_lru_add(struct pagevec - SetPageActive(page); - zone->recent_rotated[file]++; - } -- add_page_to_lru_list(zone, page, lru); -+ __add_page_to_lru_list(zone, page, lru, tail); - } - if (zone) - spin_unlock_irq(&zone->lru_lock); -@@ -436,6 +448,11 @@ void ____pagevec_lru_add(struct pagevec - pagevec_reinit(pvec); - } - -+void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) -+{ -+ ______pagevec_lru_add(pvec, lru, 0); -+} -+ - EXPORT_SYMBOL(____pagevec_lru_add); - - /* - diff --git a/kernel-bfs-2.6.28/debian/patches/patch_swap_notify_core_support_2.6.28.diff b/kernel-bfs-2.6.28/debian/patches/patch_swap_notify_core_support_2.6.28.diff index 2a66eb1..d681471 100644 --- a/kernel-bfs-2.6.28/debian/patches/patch_swap_notify_core_support_2.6.28.diff +++ b/kernel-bfs-2.6.28/debian/patches/patch_swap_notify_core_support_2.6.28.diff @@ -61,7 +61,7 @@ diff -uprN linux-2.6.28/mm/swapfile.c linux-2.6.28.new/mm/swapfile.c if (count >= SWAP_MAP_MAX) return count; -@@ -283,28 +296,40 @@ static int swap_entry_free(struct swap_i +@@ -283,28 +295,39 @@ static int swap_entry_free(struct swap_i if (count) return count; @@ -76,16 +76,15 @@ diff -uprN linux-2.6.28/mm/swapfile.c linux-2.6.28.new/mm/swapfile.c - swap_list.next = p - swap_info; - nr_swap_pages++; - p->inuse_pages--; -+ if (p->swap_remap) { -+ spin_lock(&p->remap_lock); -+ swap_entry_update(p, offset); -+ } -+ else { ++ if (!p->swap_remap) { + swap_entry_update(p, offset); + if (disk->fops->swap_slot_free_notify) + disk->fops->swap_slot_free_notify(p->bdev, offset); + return 0; + } ++ ++ spin_lock(&p->remap_lock); ++ swap_entry_update(p, offset); /* Re-map the page number */ old = p->swap_remap[offset] & 0x7FFFFFFF; diff --git a/kernel-bfs-2.6.28/debian/patches/series b/kernel-bfs-2.6.28/debian/patches/series index 3339ffd..f07fad5 100644 --- a/kernel-bfs-2.6.28/debian/patches/series +++ b/kernel-bfs-2.6.28/debian/patches/series @@ -42,9 +42,8 @@ mm-lots_watermark.diff mm-kswapd_inherit_prio-1.patch mm-idleprio_prio-1.patch mm-background_scan-2.patch -mm-lru_cache_add_lru_tail.patch +mm-lru_cache_add_lru_tail-1.patch hz-raise_max.patch -cpufreq-bfs_tweaks.patch voltage_scaling_1.diff voltage_scaling_0.diff arm-proc-v7.diff diff --git a/kernel-bfs-2.6.28/debian/rx51power_defconfig b/kernel-bfs-2.6.28/debian/rx51power_defconfig index af3f360..9f721b8 100644 --- a/kernel-bfs-2.6.28/debian/rx51power_defconfig +++ b/kernel-bfs-2.6.28/debian/rx51power_defconfig @@ -296,7 +296,7 @@ CONFIG_VMSPLIT_3G=y # CONFIG_VMSPLIT_1G is not set CONFIG_PAGE_OFFSET=0xC0000000 CONFIG_PREEMPT=y -CONFIG_HZ=300 +CONFIG_HZ=100 CONFIG_AEABI=y # CONFIG_OABI_COMPAT is not set CONFIG_ARCH_FLATMEM_HAS_HOLES=y @@ -1554,8 +1554,8 @@ CONFIG_FRAMEBUFFER_CONSOLE=m # CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set # CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set # CONFIG_FONTS is not set -CONFIG_FONT_8x8=m -CONFIG_FONT_8x16=m +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y # CONFIG_LOGO is not set CONFIG_SOUND=y # CONFIG_SOUND_OSS_CORE is not set -- 1.7.9.5