1 diff -uprN linux-2.6.32.orig/include/linux/sched.h linux-2.6.32/include/linux/sched.h
2 --- linux-2.6.32.orig/include/linux/sched.h 2011-05-25 13:48:40.297372407 +0000
3 +++ linux-2.6.32/include/linux/sched.h 2011-05-25 13:48:25.742827982 +0000
4 @@ -1587,7 +1587,7 @@ static inline void tsk_cpus_current(stru
6 static inline void print_scheduler_version(void)
8 - printk(KERN_INFO"BFS CPU scheduler v0.401 by Con Kolivas.\n");
9 + printk(KERN_INFO"BFS CPU scheduler v0.404 by Con Kolivas.\n");
12 static inline int iso_task(struct task_struct *p)
13 diff -uprN linux-2.6.32.orig/kernel/sched_bfs.c linux-2.6.32/kernel/sched_bfs.c
14 --- linux-2.6.32.orig/kernel/sched_bfs.c 2011-05-25 14:16:03.817210575 +0000
15 +++ linux-2.6.32/kernel/sched_bfs.c 2011-05-25 13:48:25.798822382 +0000
16 @@ -205,6 +205,7 @@ struct rq {
18 unsigned long user_pc, nice_pc, irq_pc, softirq_pc, system_pc,
24 @@ -1020,8 +1021,8 @@ swap_sticky(struct rq *rq, unsigned long
28 - if (rq->sticky_task->sticky) {
29 - rq->sticky_task->sticky = 0;
30 + if (task_sticky(rq->sticky_task)) {
31 + clear_sticky(rq->sticky_task);
32 resched_closest_idle(rq, cpu, rq->sticky_task);
35 @@ -1366,8 +1367,7 @@ EXPORT_SYMBOL(kthread_bind);
36 * prio PRIO_LIMIT so it is always preempted.
39 -can_preempt(struct task_struct *p, int prio, u64 deadline,
40 - unsigned int policy)
41 +can_preempt(struct task_struct *p, int prio, u64 deadline)
43 /* Better static priority RT task or better policy preemption */
45 @@ -1463,8 +1463,7 @@ static void try_preempt(struct task_stru
49 - if (!can_preempt(p, highest_prio, highest_prio_rq->rq_deadline,
50 - highest_prio_rq->rq_policy))
51 + if (!can_preempt(p, highest_prio, highest_prio_rq->rq_deadline))
54 resched_task(highest_prio_rq->curr);
55 @@ -1479,8 +1478,7 @@ static void try_preempt(struct task_stru
57 if (p->policy == SCHED_IDLEPRIO)
59 - if (can_preempt(p, uprq->rq_prio, uprq->rq_deadline,
61 + if (can_preempt(p, uprq->rq_prio, uprq->rq_deadline))
62 resched_task(uprq->curr);
64 #endif /* CONFIG_SMP */
65 @@ -1964,7 +1962,7 @@ unsigned long long nr_context_switches(v
66 /* This is of course impossible */
69 - return (long long)ns;
70 + return (unsigned long long)ns;
73 unsigned long nr_iowait(void)
74 @@ -2134,13 +2132,13 @@ static void pc_idle_time(struct rq *rq,
75 if (atomic_read(&rq->nr_iowait) > 0) {
77 if (rq->iowait_pc >= 100) {
78 - rq->iowait_pc %= 100;
79 + rq->iowait_pc -= 100;
80 cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
84 if (rq->idle_pc >= 100) {
87 cpustat->idle = cputime64_add(cpustat->idle, tmp);
90 @@ -2167,19 +2165,19 @@ pc_system_time(struct rq *rq, struct tas
91 if (hardirq_count() - hardirq_offset) {
93 if (rq->irq_pc >= 100) {
96 cpustat->irq = cputime64_add(cpustat->irq, tmp);
98 } else if (softirq_count()) {
100 if (rq->softirq_pc >= 100) {
101 - rq->softirq_pc %= 100;
102 + rq->softirq_pc -= 100;
103 cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
107 if (rq->system_pc >= 100) {
108 - rq->system_pc %= 100;
109 + rq->system_pc -= 100;
110 cpustat->system = cputime64_add(cpustat->system, tmp);
113 @@ -2241,6 +2239,16 @@ update_cpu_clock(struct rq *rq, struct t
114 int user_tick = user_mode(get_irq_regs());
116 /* Accurate tick timekeeping */
117 + rq->account_pc += account_pc - 100;
118 + if (rq->account_pc < 0) {
120 + * Small errors in micro accounting may not make the
121 + * accounting add up to 100% each tick so we keep track
122 + * of the percentage and round it up when less than 100
124 + account_pc += -rq->account_pc;
125 + rq->account_pc = 0;
128 pc_user_time(rq, p, account_pc, account_ns);
129 else if (p != idle || (irq_count() != HARDIRQ_OFFSET))
130 @@ -2250,6 +2258,7 @@ update_cpu_clock(struct rq *rq, struct t
131 pc_idle_time(rq, account_pc);
133 /* Accurate subtick timekeeping */
134 + rq->account_pc += account_pc;
136 pc_idle_time(rq, account_pc);
138 @@ -2745,15 +2754,25 @@ retry:
139 if (idx >= PRIO_LIMIT)
141 queue = grq.queue + idx;
143 + if (idx < MAX_RT_PRIO) {
144 + /* We found an rt task */
145 + list_for_each_entry(p, queue, run_list) {
146 + /* Make sure cpu affinity is ok */
147 + if (needs_other_cpu(p, cpu))
152 + /* None of the RT tasks at this priority can run on this cpu */
157 list_for_each_entry(p, queue, run_list) {
158 /* Make sure cpu affinity is ok */
159 if (needs_other_cpu(p, cpu))
161 - if (idx < MAX_RT_PRIO) {
162 - /* We found an rt task */
168 * Soft affinity happens here by not scheduling a task with
169 @@ -4194,11 +4213,10 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t
170 SYSCALL_DEFINE0(sched_yield)
172 struct task_struct *p;
176 - rq = task_grq_lock_irq(p);
177 - schedstat_inc(rq, yld_count);
179 + schedstat_inc(task_rq(p), yld_count);