1 Add an "above background load" function which can be used for background
2 tasks elsewhere (e.g. VM).
6 include/linux/sched.h | 7 +++++++
7 kernel/sched_bfs.c | 20 ++++++++++++++++++++
8 2 files changed, 27 insertions(+)
10 Index: linux-2.6.39-ck1/include/linux/sched.h
11 ===================================================================
12 --- linux-2.6.39-ck1.orig/include/linux/sched.h 2011-05-19 19:36:35.115273667 +1000
13 +++ linux-2.6.39-ck1/include/linux/sched.h 2011-05-19 19:36:35.551273667 +1000
14 @@ -1409,6 +1409,7 @@ struct task_struct {
15 #define tsk_seruntime(t) ((t)->sched_time)
16 #define tsk_rttimeout(t) ((t)->rt_timeout)
17 #define task_rq_unlock_wait(tsk) grq_unlock_wait()
18 +extern int above_background_load(void);
20 static inline void set_oom_timeslice(struct task_struct *p)
22 @@ -1620,6 +1621,12 @@ static inline int iso_task(struct task_s
27 +/* Anyone feel like implementing this? */
28 +static inline int above_background_load(void)
35 Index: linux-2.6.39-ck1/kernel/sched_bfs.c
36 ===================================================================
37 --- linux-2.6.39-ck1.orig/kernel/sched_bfs.c 2011-05-19 19:36:35.121273667 +1000
38 +++ linux-2.6.39-ck1/kernel/sched_bfs.c 2011-05-19 19:36:35.553273667 +1000
39 @@ -563,6 +563,26 @@ static inline void __task_grq_unlock(voi
44 + * Look for any tasks *anywhere* that are running nice 0 or better. We do
45 + * this lockless for overhead reasons since the occasional wrong result
48 +int above_background_load(void)
50 + struct task_struct *cpu_curr;
53 + for_each_online_cpu(cpu) {
54 + cpu_curr = cpu_rq(cpu)->curr;
55 + if (unlikely(!cpu_curr))
57 + if (PRIO_TO_NICE(cpu_curr->static_prio) < 1)
63 #ifndef __ARCH_WANT_UNLOCKED_CTXSW
64 static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)