BFS prerequisites and tidy up of resched functions
[kernel-bfs] / kernel-bfs-2.6.28 / debian / patches / bfs / bfs-tidy_up_resched.patch
1 --- linux-2.6.28/kernel/sched_bfs.c     2011-06-17 23:09:25.884488799 +0200
2 +++ linux-2.6.28.new/kernel/sched_bfs.c 2011-06-17 23:15:51.483825482 +0200
3 @@ -2459,7 +2459,7 @@ need_resched_nonpreemptible:
4         if (unlikely(reacquire_kernel_lock(current) < 0))
5                 goto need_resched_nonpreemptible;
6         preempt_enable_no_resched();
7 -       if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
8 +       if (need_resched())
9                 goto need_resched;
10  }
11  EXPORT_SYMBOL(schedule);
12 @@ -2491,7 +2491,7 @@ asmlinkage void __sched preempt_schedule
13                  * between schedule and now.
14                  */
15                 barrier();
16 -       } while (unlikely(test_thread_flag(TIF_NEED_RESCHED)));
17 +       } while (need_resched());
18  }
19  EXPORT_SYMBOL(preempt_schedule);
20  
21 @@ -2520,7 +2520,7 @@ asmlinkage void __sched preempt_schedule
22                  * between schedule and now.
23                  */
24                 barrier();
25 -       } while (unlikely(test_thread_flag(TIF_NEED_RESCHED)));
26 +       } while (need_resched());
27  }
28  
29  #endif /* CONFIG_PREEMPT */
30 @@ -3489,6 +3489,11 @@ asmlinkage long sys_sched_yield(void)
31         return 0;
32  }
33  
34 +static inline int should_resched(void)
35 +{
36 +       return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
37 +}
38 +
39  static void __cond_resched(void)
40  {
41         /* NOT a real fix but will make voluntary preempt work. 馬鹿な事 */
42 @@ -3511,8 +3516,7 @@ static void __cond_resched(void)
43  
44  int __sched _cond_resched(void)
45  {
46 -       if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) &&
47 -                                       system_state == SYSTEM_RUNNING) {
48 +       if (should_resched()) {
49                 __cond_resched();
50                 return 1;
51         }
52 @@ -3530,12 +3534,12 @@ EXPORT_SYMBOL(_cond_resched);
53   */
54  int cond_resched_lock(spinlock_t *lock)
55  {
56 -       int resched = need_resched() && system_state == SYSTEM_RUNNING;
57 +       int resched = should_resched();
58         int ret = 0;
59  
60         if (spin_needbreak(lock) || resched) {
61                 spin_unlock(lock);
62 -               if (resched && need_resched())
63 +               if (resched)
64                         __cond_resched();
65                 else
66                         cpu_relax();
67 @@ -3550,7 +3554,7 @@ int __sched cond_resched_softirq(void)
68  {
69         BUG_ON(!in_softirq());
70  
71 -       if (need_resched() && system_state == SYSTEM_RUNNING) {
72 +       if (should_resched()) {
73                 local_bh_enable();
74                 __cond_resched();
75                 local_bh_disable();
76 @@ -3919,7 +3923,7 @@ void wake_up_idle_cpu(int cpu)
77          * lockless. The worst case is that the other CPU runs the
78          * idle task through an additional NOOP schedule()
79          */
80 -       set_tsk_thread_flag(idle, TIF_NEED_RESCHED);
81 +       set_tsk_need_resched(idle);
82  
83         /* NEED_RESCHED must be visible before we test polling */
84         smp_mb();