1 --- linux-2.6.28/kernel/sched_bfs.c 2011-06-17 23:09:25.884488799 +0200
2 +++ linux-2.6.28.new/kernel/sched_bfs.c 2011-06-17 23:15:51.483825482 +0200
3 @@ -2459,7 +2459,7 @@ need_resched_nonpreemptible:
4 if (unlikely(reacquire_kernel_lock(current) < 0))
5 goto need_resched_nonpreemptible;
6 preempt_enable_no_resched();
7 - if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
11 EXPORT_SYMBOL(schedule);
12 @@ -2491,7 +2491,7 @@ asmlinkage void __sched preempt_schedule
13 * between schedule and now.
16 - } while (unlikely(test_thread_flag(TIF_NEED_RESCHED)));
17 + } while (need_resched());
19 EXPORT_SYMBOL(preempt_schedule);
21 @@ -2520,7 +2520,7 @@ asmlinkage void __sched preempt_schedule
22 * between schedule and now.
25 - } while (unlikely(test_thread_flag(TIF_NEED_RESCHED)));
26 + } while (need_resched());
29 #endif /* CONFIG_PREEMPT */
30 @@ -3489,6 +3489,11 @@ asmlinkage long sys_sched_yield(void)
34 +static inline int should_resched(void)
36 + return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
39 static void __cond_resched(void)
41 /* NOT a real fix but will make voluntary preempt work. 馬鹿な事 */
42 @@ -3511,8 +3516,7 @@ static void __cond_resched(void)
44 int __sched _cond_resched(void)
46 - if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) &&
47 - system_state == SYSTEM_RUNNING) {
48 + if (should_resched()) {
52 @@ -3530,12 +3534,12 @@ EXPORT_SYMBOL(_cond_resched);
54 int cond_resched_lock(spinlock_t *lock)
56 - int resched = need_resched() && system_state == SYSTEM_RUNNING;
57 + int resched = should_resched();
60 if (spin_needbreak(lock) || resched) {
62 - if (resched && need_resched())
67 @@ -3550,7 +3554,7 @@ int __sched cond_resched_softirq(void)
69 BUG_ON(!in_softirq());
71 - if (need_resched() && system_state == SYSTEM_RUNNING) {
72 + if (should_resched()) {
76 @@ -3919,7 +3923,7 @@ void wake_up_idle_cpu(int cpu)
77 * lockless. The worst case is that the other CPU runs the
78 * idle task through an additional NOOP schedule()
80 - set_tsk_thread_flag(idle, TIF_NEED_RESCHED);
81 + set_tsk_need_resched(idle);
83 /* NEED_RESCHED must be visible before we test polling */