1 Make sure we disable preemption in try_to_wake_up and when changing the cpu
2 in set_cpus_allowed_ptr.
4 Rework the change in rr_interval with number of cpus to not go up quite so
5 quickly, scaling only by 50% every doubling of CPUs for better interactivity
6 on multicore machines. Throughput did not appear to decrease measurably with
12 include/linux/sched.h | 2 +-
13 kernel/sched_bfs.c | 14 ++++++++++++--
14 2 files changed, 13 insertions(+), 3 deletions(-)
16 Index: kernel-2.6.28/include/linux/sched.h
17 ===================================================================
18 --- kernel-2.6.28.orig/include/linux/sched.h
19 +++ kernel-2.6.28/include/linux/sched.h
20 @@ -1422,7 +1422,7 @@ static inline void tsk_cpus_current(stru
22 static inline void print_scheduler_version(void)
24 - printk(KERN_INFO"BFS CPU scheduler v0.316 by Con Kolivas ported by ToAsTcfh.\n");
25 + printk(KERN_INFO"BFS CPU scheduler v0.318 by Con Kolivas ported by ToAsTcfh.\n");
28 static inline int iso_task(struct task_struct *p)
29 Index: kernel-2.6.28/kernel/sched_bfs.c
30 ===================================================================
31 --- kernel-2.6.28.orig/kernel/sched_bfs.c
32 +++ kernel-2.6.28/kernel/sched_bfs.c
33 @@ -1193,6 +1193,8 @@ static int try_to_wake_up(struct task_st
39 /* This barrier is undocumented, probably for p->state? くそ */
42 @@ -1227,6 +1229,8 @@ out_running:
43 p->state = TASK_RUNNING;
45 task_grq_unlock(&flags);
51 @@ -5748,7 +5752,7 @@ static int cache_cpu_idle(unsigned long
52 void __init sched_init_smp(void)
54 struct sched_domain *sd;
56 + int cpu, i, cpu_scale;
58 cpumask_t non_isolated_cpus;
60 @@ -5783,7 +5787,13 @@ void __init sched_init_smp(void)
61 * allowing us to increase the base rr_interval, but in a non linear
64 - rr_interval *= 1 + ilog2(num_online_cpus());
65 + cpu_scale = ilog2(num_online_cpus());
67 + for (i = 0; i < cpu_scale; i++) {