Changes to series and rx51_defconfig file for BFQ
[kernel-bfs] / kernel-bfs-2.6.28 / debian / patches / sched-add-above-background-load-function.patch
1 Add an "above background load" function which can be used for background
2 tasks elsewhere (e.g. VM).
3
4 -ck
5 ---
6  include/linux/sched.h |    7 +++++++
7  kernel/sched_bfs.c    |   20 ++++++++++++++++++++
8  2 files changed, 27 insertions(+)
9
10 Index: linux-2.6.39-ck1/include/linux/sched.h
11 ===================================================================
12 --- linux-2.6.39-ck1.orig/include/linux/sched.h 2011-05-19 19:36:35.115273667 +1000
13 +++ linux-2.6.39-ck1/include/linux/sched.h      2011-05-19 19:36:35.551273667 +1000
14 @@ -1590,6 +1590,7 @@ static inline int iso_task(struct task_s
15  {
16         return (p->policy == SCHED_ISO);
17  }
18 +extern int above_background_load(void);
19  #else /* CFS */
20  extern int runqueue_is_locked(int cpu);
21  extern void task_rq_unlock_wait(struct task_struct *p);
22 @@ -1620,6 +1621,12 @@ static inline int iso_task(struct task_s
23  {
24         return 0;
25  }
26 +
27 +/* Anyone feel like implementing this? */
28 +static inline int above_background_load(void)
29 +{
30 +       return 1;
31 +}
32  #endif
33  
34  /*
35 Index: linux-2.6.39-ck1/kernel/sched_bfs.c
36 ===================================================================
37 --- linux-2.6.39-ck1.orig/kernel/sched_bfs.c    2011-05-19 19:36:35.121273667 +1000
38 +++ linux-2.6.39-ck1/kernel/sched_bfs.c 2011-05-19 19:36:35.553273667 +1000
39 @@ -563,6 +563,26 @@ static inline void __task_grq_unlock(voi
40         grq_unlock();
41  }
42  
43 +/*
44 + * Look for any tasks *anywhere* that are running nice 0 or better. We do
45 + * this lockless for overhead reasons since the occasional wrong result
46 + * is harmless.
47 + */
48 +int above_background_load(void)
49 +{
50 +       struct task_struct *cpu_curr;
51 +       unsigned long cpu;
52 +
53 +       for_each_online_cpu(cpu) {
54 +               cpu_curr = cpu_rq(cpu)->curr;
55 +               if (unlikely(!cpu_curr))
56 +                       continue;
57 +               if (PRIO_TO_NICE(cpu_curr->static_prio) < 1)
58 +                       return 1;
59 +       }
60 +       return 0;
61 +}
62 +
63  #ifndef __ARCH_WANT_UNLOCKED_CTXSW
64  static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
65  {
66