--- /dev/null
+diff -uprN linux-2.6.32.orig/include/linux/sched.h linux-2.6.32/include/linux/sched.h
+--- linux-2.6.32.orig/include/linux/sched.h 2011-05-25 13:48:40.297372407 +0000
++++ linux-2.6.32/include/linux/sched.h 2011-05-25 13:48:25.742827982 +0000
+@@ -1587,7 +1587,7 @@ static inline void tsk_cpus_current(stru
+
+ static inline void print_scheduler_version(void)
+ {
+- printk(KERN_INFO"BFS CPU scheduler v0.401 by Con Kolivas.\n");
++ printk(KERN_INFO"BFS CPU scheduler v0.404 by Con Kolivas.\n");
+ }
+
+ static inline int iso_task(struct task_struct *p)
+diff -uprN linux-2.6.32.orig/kernel/sched_bfs.c linux-2.6.32/kernel/sched_bfs.c
+--- linux-2.6.32.orig/kernel/sched_bfs.c 2011-05-25 14:16:03.817210575 +0000
++++ linux-2.6.32/kernel/sched_bfs.c 2011-05-25 13:48:25.798822382 +0000
+@@ -205,6 +205,7 @@ struct rq {
+ u64 timekeep_clock;
+ unsigned long user_pc, nice_pc, irq_pc, softirq_pc, system_pc,
+ iowait_pc, idle_pc;
++ long account_pc;
+ atomic_t nr_iowait;
+
+ #ifdef CONFIG_SMP
+@@ -1020,8 +1021,8 @@ swap_sticky(struct rq *rq, unsigned long
+ p->sticky = 1;
+ return;
+ }
+- if (rq->sticky_task->sticky) {
+- rq->sticky_task->sticky = 0;
++ if (task_sticky(rq->sticky_task)) {
++ clear_sticky(rq->sticky_task);
+ resched_closest_idle(rq, cpu, rq->sticky_task);
+ }
+ }
+@@ -1366,8 +1367,7 @@ EXPORT_SYMBOL(kthread_bind);
+ * prio PRIO_LIMIT so it is always preempted.
+ */
+ static inline int
+-can_preempt(struct task_struct *p, int prio, u64 deadline,
+- unsigned int policy)
++can_preempt(struct task_struct *p, int prio, u64 deadline)
+ {
+ /* Better static priority RT task or better policy preemption */
+ if (p->prio < prio)
+@@ -1463,8 +1463,7 @@ static void try_preempt(struct task_stru
+ }
+ }
+
+- if (!can_preempt(p, highest_prio, highest_prio_rq->rq_deadline,
+- highest_prio_rq->rq_policy))
++ if (!can_preempt(p, highest_prio, highest_prio_rq->rq_deadline))
+ return;
+
+ resched_task(highest_prio_rq->curr);
+@@ -1479,8 +1478,7 @@ static void try_preempt(struct task_stru
+ {
+ if (p->policy == SCHED_IDLEPRIO)
+ return;
+- if (can_preempt(p, uprq->rq_prio, uprq->rq_deadline,
+- uprq->rq_policy))
++ if (can_preempt(p, uprq->rq_prio, uprq->rq_deadline))
+ resched_task(uprq->curr);
+ }
+ #endif /* CONFIG_SMP */
+@@ -1964,7 +1962,7 @@ unsigned long long nr_context_switches(v
+ /* This is of course impossible */
+ if (unlikely(ns < 0))
+ ns = 1;
+- return (long long)ns;
++ return (unsigned long long)ns;
+ }
+
+ unsigned long nr_iowait(void)
+@@ -2134,13 +2132,13 @@ static void pc_idle_time(struct rq *rq,
+ if (atomic_read(&rq->nr_iowait) > 0) {
+ rq->iowait_pc += pc;
+ if (rq->iowait_pc >= 100) {
+- rq->iowait_pc %= 100;
++ rq->iowait_pc -= 100;
+ cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
+ }
+ } else {
+ rq->idle_pc += pc;
+ if (rq->idle_pc >= 100) {
+- rq->idle_pc %= 100;
++ rq->idle_pc -= 100;
+ cpustat->idle = cputime64_add(cpustat->idle, tmp);
+ }
+ }
+@@ -2167,19 +2165,19 @@ pc_system_time(struct rq *rq, struct tas
+ if (hardirq_count() - hardirq_offset) {
+ rq->irq_pc += pc;
+ if (rq->irq_pc >= 100) {
+- rq->irq_pc %= 100;
++ rq->irq_pc -= 100;
+ cpustat->irq = cputime64_add(cpustat->irq, tmp);
+ }
+ } else if (softirq_count()) {
+ rq->softirq_pc += pc;
+ if (rq->softirq_pc >= 100) {
+- rq->softirq_pc %= 100;
++ rq->softirq_pc -= 100;
+ cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
+ }
+ } else {
+ rq->system_pc += pc;
+ if (rq->system_pc >= 100) {
+- rq->system_pc %= 100;
++ rq->system_pc -= 100;
+ cpustat->system = cputime64_add(cpustat->system, tmp);
+ }
+ }
+@@ -2241,6 +2239,16 @@ update_cpu_clock(struct rq *rq, struct t
+ int user_tick = user_mode(get_irq_regs());
+
+ /* Accurate tick timekeeping */
++ rq->account_pc += account_pc - 100;
++ if (rq->account_pc < 0) {
++ /*
++ * Small errors in micro accounting may not make the
++ * accounting add up to 100% each tick so we keep track
++ * of the percentage and round it up when less than 100
++ */
++ account_pc += -rq->account_pc;
++ rq->account_pc = 0;
++ }
+ if (user_tick)
+ pc_user_time(rq, p, account_pc, account_ns);
+ else if (p != idle || (irq_count() != HARDIRQ_OFFSET))
+@@ -2250,6 +2258,7 @@ update_cpu_clock(struct rq *rq, struct t
+ pc_idle_time(rq, account_pc);
+ } else {
+ /* Accurate subtick timekeeping */
++ rq->account_pc += account_pc;
+ if (p == idle)
+ pc_idle_time(rq, account_pc);
+ else
+@@ -2745,15 +2754,25 @@ retry:
+ if (idx >= PRIO_LIMIT)
+ goto out;
+ queue = grq.queue + idx;
++
++ if (idx < MAX_RT_PRIO) {
++ /* We found an rt task */
++ list_for_each_entry(p, queue, run_list) {
++ /* Make sure cpu affinity is ok */
++ if (needs_other_cpu(p, cpu))
++ continue;
++ edt = p;
++ goto out_take;
++ }
++ /* None of the RT tasks at this priority can run on this cpu */
++ ++idx;
++ goto retry;
++ }
++
+ list_for_each_entry(p, queue, run_list) {
+ /* Make sure cpu affinity is ok */
+ if (needs_other_cpu(p, cpu))
+ continue;
+- if (idx < MAX_RT_PRIO) {
+- /* We found an rt task */
+- edt = p;
+- goto out_take;
+- }
+
+ /*
+ * Soft affinity happens here by not scheduling a task with
+@@ -4194,11 +4213,10 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t
+ SYSCALL_DEFINE0(sched_yield)
+ {
+ struct task_struct *p;
+- struct rq *rq;
+
+ p = current;
+- rq = task_grq_lock_irq(p);
+- schedstat_inc(rq, yld_count);
++ grq_lock_irq();
++ schedstat_inc(task_rq(p), yld_count);
+ requeue_task(p);
+
+ /*