Clean up patch dir; synchronize patches with kernel-power v48
[kernel-bfs] / kernel-bfs-2.6.28 / debian / patches / bfs / bfs-401-to-404.patch
1 diff -uprN linux-2.6.32.orig/include/linux/sched.h linux-2.6.32/include/linux/sched.h
2 --- linux-2.6.32.orig/include/linux/sched.h     2011-05-25 13:48:40.297372407 +0000
3 +++ linux-2.6.32/include/linux/sched.h  2011-05-25 13:48:25.742827982 +0000
4 @@ -1587,7 +1587,7 @@ static inline void tsk_cpus_current(stru
5  
6  static inline void print_scheduler_version(void)
7  {
8 -       printk(KERN_INFO"BFS CPU scheduler v0.401 by Con Kolivas.\n");
9 +       printk(KERN_INFO"BFS CPU scheduler v0.404 by Con Kolivas.\n");
10  }
11  
12  static inline int iso_task(struct task_struct *p)
13 diff -uprN linux-2.6.32.orig/kernel/sched_bfs.c linux-2.6.32/kernel/sched_bfs.c
14 --- linux-2.6.32.orig/kernel/sched_bfs.c        2011-05-25 14:16:03.817210575 +0000
15 +++ linux-2.6.32/kernel/sched_bfs.c     2011-05-25 13:48:25.798822382 +0000
16 @@ -205,6 +205,7 @@ struct rq {
17         u64 timekeep_clock;
18         unsigned long user_pc, nice_pc, irq_pc, softirq_pc, system_pc,
19                 iowait_pc, idle_pc;
20 +       long account_pc;
21         atomic_t nr_iowait;
22  
23  #ifdef CONFIG_SMP
24 @@ -1020,8 +1021,8 @@ swap_sticky(struct rq *rq, unsigned long
25                         p->sticky = 1;
26                         return;
27                 }
28 -               if (rq->sticky_task->sticky) {
29 -                       rq->sticky_task->sticky = 0;
30 +               if (task_sticky(rq->sticky_task)) {
31 +                       clear_sticky(rq->sticky_task);
32                         resched_closest_idle(rq, cpu, rq->sticky_task);
33                 }
34         }
35 @@ -1366,8 +1367,7 @@ EXPORT_SYMBOL(kthread_bind);
36   * prio PRIO_LIMIT so it is always preempted.
37   */
38  static inline int
39 -can_preempt(struct task_struct *p, int prio, u64 deadline,
40 -           unsigned int policy)
41 +can_preempt(struct task_struct *p, int prio, u64 deadline)
42  {
43         /* Better static priority RT task or better policy preemption */
44         if (p->prio < prio)
45 @@ -1463,8 +1463,7 @@ static void try_preempt(struct task_stru
46                 }
47         }
48  
49 -       if (!can_preempt(p, highest_prio, highest_prio_rq->rq_deadline,
50 -           highest_prio_rq->rq_policy))
51 +       if (!can_preempt(p, highest_prio, highest_prio_rq->rq_deadline))
52                 return;
53  
54         resched_task(highest_prio_rq->curr);
55 @@ -1479,8 +1478,7 @@ static void try_preempt(struct task_stru
56  {
57         if (p->policy == SCHED_IDLEPRIO)
58                 return;
59 -       if (can_preempt(p, uprq->rq_prio, uprq->rq_deadline,
60 -           uprq->rq_policy))
61 +       if (can_preempt(p, uprq->rq_prio, uprq->rq_deadline))
62                 resched_task(uprq->curr);
63  }
64  #endif /* CONFIG_SMP */
65 @@ -1964,7 +1962,7 @@ unsigned long long nr_context_switches(v
66         /* This is of course impossible */
67         if (unlikely(ns < 0))
68                 ns = 1;
69 -       return (long long)ns;
70 +       return (unsigned long long)ns;
71  }
72  
73  unsigned long nr_iowait(void)
74 @@ -2134,13 +2132,13 @@ static void pc_idle_time(struct rq *rq,
75         if (atomic_read(&rq->nr_iowait) > 0) {
76                 rq->iowait_pc += pc;
77                 if (rq->iowait_pc >= 100) {
78 -                       rq->iowait_pc %= 100;
79 +                       rq->iowait_pc -= 100;
80                         cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
81                 }
82         } else {
83                 rq->idle_pc += pc;
84                 if (rq->idle_pc >= 100) {
85 -                       rq->idle_pc %= 100;
86 +                       rq->idle_pc -= 100;
87                         cpustat->idle = cputime64_add(cpustat->idle, tmp);
88                 }
89         }
90 @@ -2167,19 +2165,19 @@ pc_system_time(struct rq *rq, struct tas
91         if (hardirq_count() - hardirq_offset) {
92                 rq->irq_pc += pc;
93                 if (rq->irq_pc >= 100) {
94 -                       rq->irq_pc %= 100;
95 +                       rq->irq_pc -= 100;
96                         cpustat->irq = cputime64_add(cpustat->irq, tmp);
97                 }
98         } else if (softirq_count()) {
99                 rq->softirq_pc += pc;
100                 if (rq->softirq_pc >= 100) {
101 -                       rq->softirq_pc %= 100;
102 +                       rq->softirq_pc -= 100;
103                         cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
104                 }
105         } else {
106                 rq->system_pc += pc;
107                 if (rq->system_pc >= 100) {
108 -                       rq->system_pc %= 100;
109 +                       rq->system_pc -= 100;
110                         cpustat->system = cputime64_add(cpustat->system, tmp);
111                 }
112         }
113 @@ -2241,6 +2239,16 @@ update_cpu_clock(struct rq *rq, struct t
114                 int user_tick = user_mode(get_irq_regs());
115  
116                 /* Accurate tick timekeeping */
117 +               rq->account_pc += account_pc - 100;
118 +               if (rq->account_pc < 0) {
119 +                       /*
120 +                        * Small errors in micro accounting may not make the
121 +                        * accounting add up to 100% each tick so we keep track
122 +                        * of the percentage and round it up when less than 100
123 +                        */
124 +                       account_pc += -rq->account_pc;
125 +                       rq->account_pc = 0;
126 +               }
127                 if (user_tick)
128                         pc_user_time(rq, p, account_pc, account_ns);
129                 else if (p != idle || (irq_count() != HARDIRQ_OFFSET))
130 @@ -2250,6 +2258,7 @@ update_cpu_clock(struct rq *rq, struct t
131                         pc_idle_time(rq, account_pc);
132         } else {
133                 /* Accurate subtick timekeeping */
134 +               rq->account_pc += account_pc;
135                 if (p == idle)
136                         pc_idle_time(rq, account_pc);
137                 else
138 @@ -2745,15 +2754,25 @@ retry:
139         if (idx >= PRIO_LIMIT)
140                 goto out;
141         queue = grq.queue + idx;
142 +
143 +       if (idx < MAX_RT_PRIO) {
144 +               /* We found an rt task */
145 +               list_for_each_entry(p, queue, run_list) {
146 +                       /* Make sure cpu affinity is ok */
147 +                       if (needs_other_cpu(p, cpu))
148 +                               continue;
149 +                       edt = p;
150 +                       goto out_take;
151 +               }
152 +               /* None of the RT tasks at this priority can run on this cpu */
153 +               ++idx;
154 +               goto retry;
155 +       }
156 +
157         list_for_each_entry(p, queue, run_list) {
158                 /* Make sure cpu affinity is ok */
159                 if (needs_other_cpu(p, cpu))
160                         continue;
161 -               if (idx < MAX_RT_PRIO) {
162 -                       /* We found an rt task */
163 -                       edt = p;
164 -                       goto out_take;
165 -               }
166  
167                 /*
168                  * Soft affinity happens here by not scheduling a task with
169 @@ -4194,11 +4213,10 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t
170  SYSCALL_DEFINE0(sched_yield)
171  {
172         struct task_struct *p;
173 -       struct rq *rq;
174  
175         p = current;
176 -       rq = task_grq_lock_irq(p);
177 -       schedstat_inc(rq, yld_count);
178 +       grq_lock_irq();
179 +       schedstat_inc(task_rq(p), yld_count);
180         requeue_task(p);
181  
182         /*