- if (unlikely(reacquire_kernel_lock(current) < 0))
+rerun_prev_unlocked:
+ if (unlikely(reacquire_kernel_lock(current) < 0)) {
-+ prev = rq->curr;
-+ switch_count = &prev->nivcsw;
++// prev = rq->curr;
++// switch_count = &prev->nivcsw;
goto need_resched_nonpreemptible;
+ }
+
p->sched_reset_on_fork = reset_on_fork;
queued = task_queued(p);
-@@ -4453,7 +4564,7 @@ migration_call(struct notifier_block *nf
+@@ -4444,7 +4556,6 @@ migration_call(struct notifier_block *nf
+
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+- cpuset_lock(); /* around calls to cpuset_cpus_allowed_lock() */
+ idle = rq->idle;
+ /* Idle task back to normal (off runqueue, low prio) */
+ grq_lock_irq();
+@@ -4453,9 +4564,8 @@ migration_call(struct notifier_block *nf
__setscheduler(idle, rq, SCHED_NORMAL, 0);
idle->prio = PRIO_LIMIT;
set_rq_task(rq, idle);
- update_rq_clock(rq);
+ update_clocks(rq);
grq_unlock_irq();
- cpuset_unlock();
+- cpuset_unlock();
break;
+
+ case CPU_DYING:
@@ -5982,12 +6093,14 @@ void __init sched_init(void)
int i;
struct rq *rq;
rq->sd = NULL;
rq->rd = NULL;
rq->online = 0;
+@@ -6219,10 +6334,6 @@ cputime_t task_stime(struct task_struct
+ }
+ #endif
+
+-void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
+-{
+-}
+-
+ inline cputime_t task_gtime(struct task_struct *p)
+ {
+ return p->gtime;
Index: kernel-2.6.28/kernel/sysctl.c
===================================================================
--- kernel-2.6.28.orig/kernel/sysctl.c