updating voltage scaling patch to compile along with other patches to enable them...
authorCorey O'Connor <coreyoconnor@gmail.com>
Thu, 2 Sep 2010 20:47:42 +0000 (13:47 -0700)
committerCorey O'Connor <coreyoconnor@gmail.com>
Thu, 2 Sep 2010 20:47:42 +0000 (13:47 -0700)
kernel-power-2.6.28/debian/patches/armthumb.diff
kernel-power-2.6.28/debian/patches/bfs.patch
kernel-power-2.6.28/debian/patches/series
kernel-power-2.6.28/debian/patches/voltage_scaling_0.diff

index 2588b0a..32b3055 100644 (file)
@@ -1,6 +1,8 @@
---- kernel-power-2.6.28.orig/arch/arm/Kconfig
-+++ kernel-power-2.6.28/arch/arm/Kconfig
-@@ -722,6 +722,22 @@
+Index: kernel-2.6.28/arch/arm/Kconfig
+===================================================================
+--- kernel-2.6.28.orig/arch/arm/Kconfig
++++ kernel-2.6.28/arch/arm/Kconfig
+@@ -722,6 +722,22 @@ source "drivers/pci/Kconfig"
  
  source "drivers/pcmcia/Kconfig"
  
  endmenu
  
  menu "Kernel Features"
---- kernel-power-2.6.28.orig/arch/arm/mm/proc-v7.S
-+++ kernel-power-2.6.28/arch/arm/mm/proc-v7.S
-@@ -90,7 +90,10 @@
- #ifdef CONFIG_MMU
+Index: kernel-2.6.28/arch/arm/mm/proc-v7.S
+===================================================================
+--- kernel-2.6.28.orig/arch/arm/mm/proc-v7.S
++++ kernel-2.6.28/arch/arm/mm/proc-v7.S
+@@ -91,6 +91,9 @@ ENTRY(cpu_v7_switch_mm)
        mov     r2, #0
        ldr     r1, [r1, #MM_CONTEXT_ID]        @ get mm->context.id
--      orr     r0, r0, #TTB_RGN_OC_WB          @ mark PTWs outer cacheable, WB
-+      orr     r0, r0, #TTB_RGN_OC_WB          @ mark PTWs outer cacheable, WB
+       orr     r0, r0, #TTB_RGN_OC_WB          @ mark PTWs outer cacheable, WB
 +#ifdef CONFIG_ARM_ERRATA_430973
 +      mcr     p15, 0, r2, c7, c5, 6           @ flush BTAC/BTB
 +#endif
        mcr     p15, 0, r2, c13, c0, 1          @ set reserved context ID
        isb
  1:    mcr     p15, 0, r0, c2, c0, 0           @ set TTB 0
-@@ -171,6 +174,11 @@
+@@ -171,6 +174,11 @@ __v7_setup:
        stmia   r12, {r0-r5, r7, r9, r11, lr}
        bl      v7_flush_dcache_all
        ldmia   r12, {r0-r5, r7, r9, r11, lr}
index 85f94bf..4e2ac44 100644 (file)
@@ -1,8 +1,7 @@
-diff --git a/Documentation/scheduler/sched-BFS.txt b/Documentation/scheduler/sched-BFS.txt
-new file mode 100644
-index 0000000..e704693
+Index: kernel-2.6.28/Documentation/scheduler/sched-BFS.txt
+===================================================================
 --- /dev/null
-+++ b/Documentation/scheduler/sched-BFS.txt
++++ kernel-2.6.28/Documentation/scheduler/sched-BFS.txt
 @@ -0,0 +1,359 @@
 +***************
 +*** 0 ****
@@ -363,10 +362,10 @@ index 0000000..e704693
 ++ 
 ++ 
 ++ Con Kolivas <kernel@kolivas.org> Thu Dec 3 2009
-diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
-index 2ad914c..f6da979 100644
---- a/arch/powerpc/platforms/cell/spufs/sched.c
-+++ b/arch/powerpc/platforms/cell/spufs/sched.c
+Index: kernel-2.6.28/arch/powerpc/platforms/cell/spufs/sched.c
+===================================================================
+--- kernel-2.6.28.orig/arch/powerpc/platforms/cell/spufs/sched.c
++++ kernel-2.6.28/arch/powerpc/platforms/cell/spufs/sched.c
 @@ -62,11 +62,6 @@ static struct timer_list spusched_timer;
  static struct timer_list spuloadavg_timer;
  
@@ -379,11 +378,11 @@ index 2ad914c..f6da979 100644
   * Frequency of the spu scheduler tick.  By default we do one SPU scheduler
   * tick for every 10 CPU scheduler ticks.
   */
-diff --git a/fs/proc/base.c b/fs/proc/base.c
-index d467760..8f7ccde 100644
---- a/fs/proc/base.c
-+++ b/fs/proc/base.c
-@@ -347,7 +347,7 @@ static int proc_pid_wchan(struct task_struct *task, char *buffer)
+Index: kernel-2.6.28/fs/proc/base.c
+===================================================================
+--- kernel-2.6.28.orig/fs/proc/base.c
++++ kernel-2.6.28/fs/proc/base.c
+@@ -336,7 +336,7 @@ static int proc_pid_wchan(struct task_st
  static int proc_pid_schedstat(struct task_struct *task, char *buffer)
  {
        return sprintf(buffer, "%llu %llu %lu\n",
@@ -392,10 +391,10 @@ index d467760..8f7ccde 100644
                        task->sched_info.run_delay,
                        task->sched_info.pcount);
  }
-diff --git a/include/linux/init_task.h b/include/linux/init_task.h
-index 23fd890..85552e9 100644
---- a/include/linux/init_task.h
-+++ b/include/linux/init_task.h
+Index: kernel-2.6.28/include/linux/init_task.h
+===================================================================
+--- kernel-2.6.28.orig/include/linux/init_task.h
++++ kernel-2.6.28/include/linux/init_task.h
 @@ -47,6 +47,11 @@ extern struct files_struct init_files;
        .posix_timers    = LIST_HEAD_INIT(sig.posix_timers),            \
        .cpu_timers     = INIT_CPU_TIMERS(sig.cpu_timers),              \
@@ -485,11 +484,11 @@ index 23fd890..85552e9 100644
  
  #define INIT_CPU_TIMERS(cpu_timers)                                   \
  {                                                                     \
-diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
-index f98a656..b342d9d 100644
---- a/include/linux/ioprio.h
-+++ b/include/linux/ioprio.h
-@@ -64,6 +64,8 @@ static inline int task_ioprio_class(struct io_context *ioc)
+Index: kernel-2.6.28/include/linux/ioprio.h
+===================================================================
+--- kernel-2.6.28.orig/include/linux/ioprio.h
++++ kernel-2.6.28/include/linux/ioprio.h
+@@ -64,6 +64,8 @@ static inline int task_ioprio_class(stru
  
  static inline int task_nice_ioprio(struct task_struct *task)
  {
@@ -498,11 +497,11 @@ index f98a656..b342d9d 100644
        return (task_nice(task) + 20) / 5;
  }
  
-diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
-index 4a145ca..c0c4a92 100644
---- a/include/linux/kernel_stat.h
-+++ b/include/linux/kernel_stat.h
-@@ -67,10 +67,16 @@ static inline unsigned int kstat_irqs(unsigned int irq)
+Index: kernel-2.6.28/include/linux/kernel_stat.h
+===================================================================
+--- kernel-2.6.28.orig/include/linux/kernel_stat.h
++++ kernel-2.6.28/include/linux/kernel_stat.h
+@@ -67,10 +67,16 @@ static inline unsigned int kstat_irqs(un
  }
  
  extern unsigned long long task_delta_exec(struct task_struct *);
@@ -524,10 +523,10 @@ index 4a145ca..c0c4a92 100644
 +extern void account_system_time_scaled(struct task_struct *, cputime_t, cputime_t);
  
  #endif /* _LINUX_KERNEL_STAT_H */
-diff --git a/include/linux/sched.h b/include/linux/sched.h
-index 3883c32..1b682f2 100644
---- a/include/linux/sched.h
-+++ b/include/linux/sched.h
+Index: kernel-2.6.28/include/linux/sched.h
+===================================================================
+--- kernel-2.6.28.orig/include/linux/sched.h
++++ kernel-2.6.28/include/linux/sched.h
 @@ -36,8 +36,14 @@
  #define SCHED_FIFO            1
  #define SCHED_RR              2
@@ -544,7 +543,7 @@ index 3883c32..1b682f2 100644
  
  #ifdef __KERNEL__
  
-@@ -246,7 +252,6 @@ extern asmlinkage void schedule_tail(struct task_struct *prev);
+@@ -247,7 +253,6 @@ extern asmlinkage void schedule_tail(str
  extern void init_idle(struct task_struct *idle, int cpu);
  extern void init_idle_bootup_task(struct task_struct *idle);
  
@@ -552,7 +551,7 @@ index 3883c32..1b682f2 100644
  extern void task_rq_unlock_wait(struct task_struct *p);
  
  extern cpumask_t nohz_cpu_mask;
-@@ -455,16 +460,27 @@ struct task_cputime {
+@@ -456,16 +461,27 @@ struct task_cputime {
  #define virt_exp      utime
  #define sched_exp     sum_exec_runtime
  
@@ -586,7 +585,7 @@ index 3883c32..1b682f2 100644
  };
  
  /*
-@@ -513,10 +529,10 @@ struct signal_struct {
+@@ -514,10 +530,10 @@ struct signal_struct {
        cputime_t it_prof_incr, it_virt_incr;
  
        /*
@@ -600,7 +599,7 @@ index 3883c32..1b682f2 100644
  
        /* Earliest-expiration cache. */
        struct task_cputime cputime_expires;
-@@ -553,7 +569,7 @@ struct signal_struct {
+@@ -554,7 +570,7 @@ struct signal_struct {
         * Live threads maintain their own counters and add to these
         * in __exit_signal, except for the group leader.
         */
@@ -609,7 +608,7 @@ index 3883c32..1b682f2 100644
        cputime_t gtime;
        cputime_t cgtime;
        unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
-@@ -562,6 +578,14 @@ struct signal_struct {
+@@ -563,6 +579,14 @@ struct signal_struct {
        struct task_io_accounting ioac;
  
        /*
@@ -624,7 +623,7 @@ index 3883c32..1b682f2 100644
         * We don't bother to synchronize most readers of this at all,
         * because there is no reader checking a limit that actually needs
         * to get both rlim_cur and rlim_max atomically, and either one
-@@ -1080,17 +1104,31 @@ struct task_struct {
+@@ -1081,17 +1105,31 @@ struct task_struct {
  
        int lock_depth;         /* BKL lock depth */
  
@@ -656,7 +655,7 @@ index 3883c32..1b682f2 100644
  
  #ifdef CONFIG_PREEMPT_NOTIFIERS
        /* list of struct preempt_notifier: */
-@@ -1113,6 +1151,9 @@ struct task_struct {
+@@ -1114,6 +1152,9 @@ struct task_struct {
  
        unsigned int policy;
        cpumask_t cpus_allowed;
@@ -666,7 +665,7 @@ index 3883c32..1b682f2 100644
  
  #ifdef CONFIG_PREEMPT_RCU
        int rcu_read_lock_nesting;
-@@ -1173,6 +1214,9 @@ struct task_struct {
+@@ -1174,6 +1215,9 @@ struct task_struct {
        int __user *clear_child_tid;            /* CLONE_CHILD_CLEARTID */
  
        cputime_t utime, stime, utimescaled, stimescaled;
@@ -676,7 +675,7 @@ index 3883c32..1b682f2 100644
        cputime_t gtime;
        cputime_t prev_utime, prev_stime;
        unsigned long nvcsw, nivcsw; /* context switch counts */
-@@ -1357,6 +1401,64 @@ struct task_struct {
+@@ -1358,6 +1402,64 @@ struct task_struct {
        struct list_head        *scm_work_list;
  };
  
@@ -741,7 +740,7 @@ index 3883c32..1b682f2 100644
  /*
   * Priority of a process goes from 0..MAX_PRIO-1, valid RT
   * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
-@@ -1372,9 +1474,19 @@ struct task_struct {
+@@ -1373,9 +1475,19 @@ struct task_struct {
  
  #define MAX_USER_RT_PRIO      100
  #define MAX_RT_PRIO           MAX_USER_RT_PRIO
@@ -763,7 +762,7 @@ index 3883c32..1b682f2 100644
  
  static inline int rt_prio(int prio)
  {
-@@ -1642,7 +1754,7 @@ task_sched_runtime(struct task_struct *task);
+@@ -1643,7 +1755,7 @@ task_sched_runtime(struct task_struct *t
  extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
  
  /* sched_exec is called by processes performing an exec */
@@ -772,7 +771,7 @@ index 3883c32..1b682f2 100644
  extern void sched_exec(void);
  #else
  #define sched_exec()   {}
-@@ -1791,6 +1903,9 @@ extern void wake_up_new_task(struct task_struct *tsk,
+@@ -1792,6 +1904,9 @@ extern void wake_up_new_task(struct task
   static inline void kick_process(struct task_struct *tsk) { }
  #endif
  extern void sched_fork(struct task_struct *p, int clone_flags);
@@ -782,7 +781,7 @@ index 3883c32..1b682f2 100644
  extern void sched_dead(struct task_struct *p);
  
  extern int in_group_p(gid_t);
-@@ -2140,25 +2255,18 @@ static inline int spin_needbreak(spinlock_t *lock)
+@@ -2141,25 +2256,18 @@ static inline int spin_needbreak(spinloc
  /*
   * Thread group CPU time accounting.
   */
@@ -813,10 +812,10 @@ index 3883c32..1b682f2 100644
  }
  
  /*
-diff --git a/init/Kconfig b/init/Kconfig
-index f763762..12b3a4a 100644
---- a/init/Kconfig
-+++ b/init/Kconfig
+Index: kernel-2.6.28/init/Kconfig
+===================================================================
+--- kernel-2.6.28.orig/init/Kconfig
++++ kernel-2.6.28/init/Kconfig
 @@ -18,6 +18,19 @@ config DEFCONFIG_LIST
  
  menu "General setup"
@@ -855,10 +854,10 @@ index f763762..12b3a4a 100644
        help
          Provides a simple Resource Controller for monitoring the
          total CPU consumed by the tasks in a cgroup
-diff --git a/init/main.c b/init/main.c
-index 7e117a2..ea6d26c 100644
---- a/init/main.c
-+++ b/init/main.c
+Index: kernel-2.6.28/init/main.c
+===================================================================
+--- kernel-2.6.28.orig/init/main.c
++++ kernel-2.6.28/init/main.c
 @@ -800,6 +800,9 @@ static int noinline init_post(void)
        system_state = SYSTEM_RUNNING;
        numa_default_policy();
@@ -869,11 +868,11 @@ index 7e117a2..ea6d26c 100644
        if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
                printk(KERN_WARNING "Warning: unable to open an initial console.\n");
  
-diff --git a/kernel/delayacct.c b/kernel/delayacct.c
-index b3179da..cbdc400 100644
---- a/kernel/delayacct.c
-+++ b/kernel/delayacct.c
-@@ -127,7 +127,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
+Index: kernel-2.6.28/kernel/delayacct.c
+===================================================================
+--- kernel-2.6.28.orig/kernel/delayacct.c
++++ kernel-2.6.28/kernel/delayacct.c
+@@ -127,7 +127,7 @@ int __delayacct_add_tsk(struct taskstats
         */
        t1 = tsk->sched_info.pcount;
        t2 = tsk->sched_info.run_delay;
@@ -882,11 +881,11 @@ index b3179da..cbdc400 100644
  
        d->cpu_count += t1;
  
-diff --git a/kernel/exit.c b/kernel/exit.c
-index 2d8be7e..7413c2a 100644
---- a/kernel/exit.c
-+++ b/kernel/exit.c
-@@ -112,6 +112,8 @@ static void __exit_signal(struct task_struct *tsk)
+Index: kernel-2.6.28/kernel/exit.c
+===================================================================
+--- kernel-2.6.28.orig/kernel/exit.c
++++ kernel-2.6.28/kernel/exit.c
+@@ -112,6 +112,8 @@ static void __exit_signal(struct task_st
                 * We won't ever get here for the group leader, since it
                 * will have been the last reference on the signal_struct.
                 */
@@ -895,7 +894,7 @@ index 2d8be7e..7413c2a 100644
                sig->gtime = cputime_add(sig->gtime, task_gtime(tsk));
                sig->min_flt += tsk->min_flt;
                sig->maj_flt += tsk->maj_flt;
-@@ -120,6 +122,7 @@ static void __exit_signal(struct task_struct *tsk)
+@@ -120,6 +122,7 @@ static void __exit_signal(struct task_st
                sig->inblock += task_io_get_inblock(tsk);
                sig->oublock += task_io_get_oublock(tsk);
                task_io_accounting_add(&sig->ioac, &tsk->ioac);
@@ -903,11 +902,11 @@ index 2d8be7e..7413c2a 100644
                sig = NULL; /* Marker for below. */
        }
  
-diff --git a/kernel/fork.c b/kernel/fork.c
-index 495da2e..fe5befb 100644
---- a/kernel/fork.c
-+++ b/kernel/fork.c
-@@ -806,14 +806,15 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
+Index: kernel-2.6.28/kernel/fork.c
+===================================================================
+--- kernel-2.6.28.orig/kernel/fork.c
++++ kernel-2.6.28/kernel/fork.c
+@@ -806,14 +806,15 @@ static int copy_signal(unsigned long clo
        int ret;
  
        if (clone_flags & CLONE_THREAD) {
@@ -929,7 +928,7 @@ index 495da2e..fe5befb 100644
        tsk->signal = sig;
        if (!sig)
                return -ENOMEM;
-@@ -843,21 +844,20 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
+@@ -843,21 +844,20 @@ static int copy_signal(unsigned long clo
        sig->tty_old_pgrp = NULL;
        sig->tty = NULL;
  
@@ -953,7 +952,7 @@ index 495da2e..fe5befb 100644
        acct_init_pacct(&sig->pacct);
  
        tty_audit_fork(sig);
-@@ -1211,7 +1211,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+@@ -1207,7 +1207,7 @@ static struct task_struct *copy_process(
         * parent's CPU). This avoids alot of nasty races.
         */
        p->cpus_allowed = current->cpus_allowed;
@@ -962,11 +961,11 @@ index 495da2e..fe5befb 100644
        if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
                        !cpu_online(task_cpu(p))))
                set_task_cpu(p, smp_processor_id());
-diff --git a/kernel/itimer.c b/kernel/itimer.c
-index db7c358..14294c0 100644
---- a/kernel/itimer.c
-+++ b/kernel/itimer.c
-@@ -62,7 +62,7 @@ int do_getitimer(int which, struct itimerval *value)
+Index: kernel-2.6.28/kernel/itimer.c
+===================================================================
+--- kernel-2.6.28.orig/kernel/itimer.c
++++ kernel-2.6.28/kernel/itimer.c
+@@ -62,7 +62,7 @@ int do_getitimer(int which, struct itime
                        struct task_cputime cputime;
                        cputime_t utime;
  
@@ -975,7 +974,7 @@ index db7c358..14294c0 100644
                        utime = cputime.utime;
                        if (cputime_le(cval, utime)) { /* about to fire */
                                cval = jiffies_to_cputime(1);
-@@ -82,7 +82,7 @@ int do_getitimer(int which, struct itimerval *value)
+@@ -82,7 +82,7 @@ int do_getitimer(int which, struct itime
                        struct task_cputime times;
                        cputime_t ptime;
  
@@ -984,10 +983,10 @@ index db7c358..14294c0 100644
                        ptime = cputime_add(times.utime, times.stime);
                        if (cputime_le(cval, ptime)) { /* about to fire */
                                cval = jiffies_to_cputime(1);
-diff --git a/kernel/kthread.c b/kernel/kthread.c
-index 8e7a7ce..af9eace 100644
---- a/kernel/kthread.c
-+++ b/kernel/kthread.c
+Index: kernel-2.6.28/kernel/kthread.c
+===================================================================
+--- kernel-2.6.28.orig/kernel/kthread.c
++++ kernel-2.6.28/kernel/kthread.c
 @@ -15,7 +15,7 @@
  #include <linux/mutex.h>
  #include <trace/sched.h>
@@ -997,7 +996,7 @@ index 8e7a7ce..af9eace 100644
  
  static DEFINE_SPINLOCK(kthread_create_lock);
  static LIST_HEAD(kthread_create_list);
-@@ -179,7 +179,6 @@ void kthread_bind(struct task_struct *k, unsigned int cpu)
+@@ -179,7 +179,6 @@ void kthread_bind(struct task_struct *k,
        }
        set_task_cpu(k, cpu);
        k->cpus_allowed = cpumask_of_cpu(cpu);
@@ -1005,10 +1004,10 @@ index 8e7a7ce..af9eace 100644
        k->flags |= PF_THREAD_BOUND;
  }
  EXPORT_SYMBOL(kthread_bind);
-diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
-index 4e5288a..d1eef76 100644
---- a/kernel/posix-cpu-timers.c
-+++ b/kernel/posix-cpu-timers.c
+Index: kernel-2.6.28/kernel/posix-cpu-timers.c
+===================================================================
+--- kernel-2.6.28.orig/kernel/posix-cpu-timers.c
++++ kernel-2.6.28/kernel/posix-cpu-timers.c
 @@ -10,76 +10,6 @@
  #include <linux/kernel_stat.h>
  
@@ -1086,14 +1085,7 @@ index 4e5288a..d1eef76 100644
   * Called after updating RLIMIT_CPU to set timer expiration if necessary.
   */
  void update_rlimit_cpu(unsigned long rlim_new)
-@@ -294,12 +224,77 @@ static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
-               cpu->cpu = virt_ticks(p);
-               break;
-       case CPUCLOCK_SCHED:
--              cpu->sched = task_sched_runtime(p);
-+              cpu->sched = task_sched_runtime(p);
-               break;
-       }
+@@ -300,6 +230,71 @@ static int cpu_clock_sample(const clocki
        return 0;
  }
  
@@ -1165,7 +1157,7 @@ index 4e5288a..d1eef76 100644
  /*
   * Sample a process (thread group) clock for the given group_leader task.
   * Must be called with tasklist_lock held for reading.
-@@ -520,16 +515,17 @@ static void cleanup_timers(struct list_head *head,
+@@ -521,16 +516,17 @@ static void cleanup_timers(struct list_h
  void posix_cpu_timers_exit(struct task_struct *tsk)
  {
        cleanup_timers(tsk->cpu_timers,
@@ -1187,7 +1179,7 @@ index 4e5288a..d1eef76 100644
  }
  
  static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
-@@ -686,6 +682,33 @@ static void cpu_timer_fire(struct k_itimer *timer)
+@@ -687,6 +683,33 @@ static void cpu_timer_fire(struct k_itim
  }
  
  /*
@@ -1221,7 +1213,7 @@ index 4e5288a..d1eef76 100644
   * Guts of sys_timer_settime for CPU timers.
   * This is called with the timer locked and interrupts disabled.
   * If we return TIMER_RETRY, it's necessary to release the timer's lock
-@@ -746,7 +769,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
+@@ -747,7 +770,7 @@ int posix_cpu_timer_set(struct k_itimer
        if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
                cpu_clock_sample(timer->it_clock, p, &val);
        } else {
@@ -1230,7 +1222,7 @@ index 4e5288a..d1eef76 100644
        }
  
        if (old) {
-@@ -894,7 +917,7 @@ void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
+@@ -895,7 +918,7 @@ void posix_cpu_timer_get(struct k_itimer
                        read_unlock(&tasklist_lock);
                        goto dead;
                } else {
@@ -1239,7 +1231,7 @@ index 4e5288a..d1eef76 100644
                        clear_dead = (unlikely(p->exit_state) &&
                                      thread_group_empty(p));
                }
-@@ -956,6 +979,7 @@ static void check_thread_timers(struct task_struct *tsk,
+@@ -957,6 +980,7 @@ static void check_thread_timers(struct t
        int maxfire;
        struct list_head *timers = tsk->cpu_timers;
        struct signal_struct *const sig = tsk->signal;
@@ -1247,7 +1239,7 @@ index 4e5288a..d1eef76 100644
  
        maxfire = 20;
        tsk->cputime_expires.prof_exp = cputime_zero;
-@@ -993,7 +1017,7 @@ static void check_thread_timers(struct task_struct *tsk,
+@@ -994,7 +1018,7 @@ static void check_thread_timers(struct t
                struct cpu_timer_list *t = list_first_entry(timers,
                                                      struct cpu_timer_list,
                                                      entry);
@@ -1256,7 +1248,7 @@ index 4e5288a..d1eef76 100644
                        tsk->cputime_expires.sched_exp = t->expires.sched;
                        break;
                }
-@@ -1004,12 +1028,13 @@ static void check_thread_timers(struct task_struct *tsk,
+@@ -1005,12 +1029,13 @@ static void check_thread_timers(struct t
        /*
         * Check for the special case thread timers.
         */
@@ -1274,7 +1266,7 @@ index 4e5288a..d1eef76 100644
                        /*
                         * At the hard limit, we just die.
                         * No need to calculate anything else now.
-@@ -1017,14 +1042,13 @@ static void check_thread_timers(struct task_struct *tsk,
+@@ -1018,14 +1043,13 @@ static void check_thread_timers(struct t
                        __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
                        return;
                }
@@ -1293,7 +1285,7 @@ index 4e5288a..d1eef76 100644
                        }
                        printk(KERN_INFO
                                "RT Watchdog Timeout: %s[%d]\n",
-@@ -1034,6 +1058,19 @@ static void check_thread_timers(struct task_struct *tsk,
+@@ -1035,6 +1059,19 @@ static void check_thread_timers(struct t
        }
  }
  
@@ -1313,7 +1305,7 @@ index 4e5288a..d1eef76 100644
  /*
   * Check for any per-thread CPU timers that have fired and move them
   * off the tsk->*_timers list onto the firing list.  Per-thread timers
-@@ -1057,13 +1094,15 @@ static void check_process_timers(struct task_struct *tsk,
+@@ -1058,13 +1095,15 @@ static void check_process_timers(struct
            sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY &&
            list_empty(&timers[CPUCLOCK_VIRT]) &&
            cputime_eq(sig->it_virt_expires, cputime_zero) &&
@@ -1331,7 +1323,7 @@ index 4e5288a..d1eef76 100644
        utime = cputime.utime;
        ptime = cputime_add(utime, cputime.stime);
        sum_sched_runtime = cputime.sum_exec_runtime;
-@@ -1234,7 +1273,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
+@@ -1235,7 +1274,7 @@ void posix_cpu_timer_schedule(struct k_i
                        clear_dead_task(timer, now);
                        goto out_unlock;
                }
@@ -1340,7 +1332,7 @@ index 4e5288a..d1eef76 100644
                bump_cpu_timer(timer, now);
                /* Leave the tasklist_lock locked for the call below.  */
        }
-@@ -1318,7 +1357,7 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
+@@ -1319,7 +1358,7 @@ static inline int fastpath_timer_check(s
                struct task_cputime task_sample = {
                        .utime = tsk->utime,
                        .stime = tsk->stime,
@@ -1349,7 +1341,7 @@ index 4e5288a..d1eef76 100644
                };
  
                if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
-@@ -1329,7 +1368,7 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
+@@ -1330,7 +1369,7 @@ static inline int fastpath_timer_check(s
        if (!task_cputime_zero(&sig->cputime_expires)) {
                struct task_cputime group_sample;
  
@@ -1358,7 +1350,7 @@ index 4e5288a..d1eef76 100644
                if (task_cputime_expired(&group_sample, &sig->cputime_expires))
                        return 1;
        }
-@@ -1411,7 +1450,7 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
+@@ -1412,7 +1451,7 @@ void set_process_cpu_timer(struct task_s
        struct list_head *head;
  
        BUG_ON(clock_idx == CPUCLOCK_SCHED);
@@ -1367,10 +1359,10 @@ index 4e5288a..d1eef76 100644
  
        if (oldval) {
                if (!cputime_eq(*oldval, cputime_zero)) {
-diff --git a/kernel/sched.c b/kernel/sched.c
-index e4bb1dd..2869e03 100644
---- a/kernel/sched.c
-+++ b/kernel/sched.c
+Index: kernel-2.6.28/kernel/sched.c
+===================================================================
+--- kernel-2.6.28.orig/kernel/sched.c
++++ kernel-2.6.28/kernel/sched.c
 @@ -1,3 +1,6 @@
 +#ifdef CONFIG_SCHED_BFS
 +#include "sched_bfs.c"
@@ -1378,7 +1370,7 @@ index e4bb1dd..2869e03 100644
  /*
   *  kernel/sched.c
   *
-@@ -4203,7 +4206,6 @@ void account_steal_time(struct task_struct *p, cputime_t steal)
+@@ -4252,7 +4255,6 @@ void account_steal_time(struct task_stru
  
        if (p == rq->idle) {
                p->stime = cputime_add(p->stime, steal);
@@ -1386,7 +1378,7 @@ index e4bb1dd..2869e03 100644
                if (atomic_read(&rq->nr_iowait) > 0)
                        cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
                else
-@@ -4339,7 +4341,7 @@ void __kprobes sub_preempt_count(int val)
+@@ -4388,7 +4390,7 @@ void __kprobes sub_preempt_count(int val
        /*
         * Underflow?
         */
@@ -1395,16 +1387,15 @@ index e4bb1dd..2869e03 100644
                return;
        /*
         * Is the spinlock portion underflowing?
-@@ -9388,3 +9390,4 @@ struct cgroup_subsys cpuacct_subsys = {
+@@ -9437,3 +9439,4 @@ struct cgroup_subsys cpuacct_subsys = {
        .subsys_id = cpuacct_subsys_id,
  };
  #endif        /* CONFIG_CGROUP_CPUACCT */
 +#endif /* CONFIG_SCHED_BFS */
-diff --git a/kernel/sched_bfs.c b/kernel/sched_bfs.c
-new file mode 100644
-index 0000000..7cc1752
+Index: kernel-2.6.28/kernel/sched_bfs.c
+===================================================================
 --- /dev/null
-+++ b/kernel/sched_bfs.c
++++ kernel-2.6.28/kernel/sched_bfs.c
 @@ -0,0 +1,6110 @@
 +/*
 + *  kernel/sched_bfs.c, was sched.c
@@ -7516,11 +7507,11 @@ index 0000000..7cc1752
 +void proc_sched_set_task(struct task_struct *p)
 +{}
 +#endif
-diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
-index 7dbf72a..90fba60 100644
---- a/kernel/sched_stats.h
-+++ b/kernel/sched_stats.h
-@@ -296,20 +296,21 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next)
+Index: kernel-2.6.28/kernel/sched_stats.h
+===================================================================
+--- kernel-2.6.28.orig/kernel/sched_stats.h
++++ kernel-2.6.28/kernel/sched_stats.h
+@@ -296,20 +296,21 @@ sched_info_switch(struct task_struct *pr
  static inline void account_group_user_time(struct task_struct *tsk,
                                           cputime_t cputime)
  {
@@ -7550,7 +7541,7 @@ index 7dbf72a..90fba60 100644
  }
  
  /**
-@@ -325,20 +326,21 @@ static inline void account_group_user_time(struct task_struct *tsk,
+@@ -325,20 +326,21 @@ static inline void account_group_user_ti
  static inline void account_group_system_time(struct task_struct *tsk,
                                             cputime_t cputime)
  {
@@ -7565,14 +7556,14 @@ index 7dbf72a..90fba60 100644
 -      if (sig->cputime.totals) {
 -              struct task_cputime *times;
 +      cputimer = &tsk->signal->cputimer;
++
++      if (!cputimer->running)
++              return;
  
 -              times = per_cpu_ptr(sig->cputime.totals, get_cpu());
 -              times->stime = cputime_add(times->stime, cputime);
 -              put_cpu_no_resched();
 -      }
-+      if (!cputimer->running)
-+              return;
-+
 +      spin_lock(&cputimer->lock);
 +      cputimer->cputime.stime =
 +              cputime_add(cputimer->cputime.stime, cputime);
@@ -7580,7 +7571,7 @@ index 7dbf72a..90fba60 100644
  }
  
  /**
-@@ -354,6 +356,7 @@ static inline void account_group_system_time(struct task_struct *tsk,
+@@ -354,6 +356,7 @@ static inline void account_group_system_
  static inline void account_group_exec_runtime(struct task_struct *tsk,
                                              unsigned long long ns)
  {
@@ -7588,30 +7579,30 @@ index 7dbf72a..90fba60 100644
        struct signal_struct *sig;
  
        sig = tsk->signal;
-@@ -362,11 +365,12 @@ static inline void account_group_exec_runtime(struct task_struct *tsk,
+@@ -362,11 +365,12 @@ static inline void account_group_exec_ru
        if (unlikely(!sig))
                return;
  
 -      if (sig->cputime.totals) {
 -              struct task_cputime *times;
 +      cputimer = &sig->cputimer;
++
++      if (!cputimer->running)
++              return;
  
 -              times = per_cpu_ptr(sig->cputime.totals, get_cpu());
 -              times->sum_exec_runtime += ns;
 -              put_cpu_no_resched();
 -      }
-+      if (!cputimer->running)
-+              return;
-+
 +      spin_lock(&cputimer->lock);
 +      cputimer->cputime.sum_exec_runtime += ns;
 +      spin_unlock(&cputimer->lock);
  }
-diff --git a/kernel/signal.c b/kernel/signal.c
-index 4530fc6..85abaea 100644
---- a/kernel/signal.c
-+++ b/kernel/signal.c
-@@ -1342,7 +1342,6 @@ int do_notify_parent(struct task_struct *tsk, int sig)
+Index: kernel-2.6.28/kernel/signal.c
+===================================================================
+--- kernel-2.6.28.orig/kernel/signal.c
++++ kernel-2.6.28/kernel/signal.c
+@@ -1342,7 +1342,6 @@ int do_notify_parent(struct task_struct
        struct siginfo info;
        unsigned long flags;
        struct sighand_struct *psig;
@@ -7619,7 +7610,7 @@ index 4530fc6..85abaea 100644
        int ret = sig;
  
        BUG_ON(sig == -1);
-@@ -1373,9 +1372,10 @@ int do_notify_parent(struct task_struct *tsk, int sig)
+@@ -1373,9 +1372,10 @@ int do_notify_parent(struct task_struct
  
        info.si_uid = tsk->uid;
  
@@ -7633,11 +7624,11 @@ index 4530fc6..85abaea 100644
  
        info.si_status = tsk->exit_code & 0x7f;
        if (tsk->exit_code & 0x80)
-diff --git a/kernel/sysctl.c b/kernel/sysctl.c
-index 3d56fe7..1fe0a2d 100644
---- a/kernel/sysctl.c
-+++ b/kernel/sysctl.c
-@@ -86,11 +86,6 @@ extern int sysctl_nr_open_min, sysctl_nr_open_max;
+Index: kernel-2.6.28/kernel/sysctl.c
+===================================================================
+--- kernel-2.6.28.orig/kernel/sysctl.c
++++ kernel-2.6.28/kernel/sysctl.c
+@@ -86,11 +86,6 @@ extern int sysctl_nr_open_min, sysctl_nr
  extern int rcutorture_runnable;
  #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
  
@@ -7674,7 +7665,7 @@ index 3d56fe7..1fe0a2d 100644
  static int min_sched_granularity_ns = 100000;         /* 100 usecs */
  static int max_sched_granularity_ns = NSEC_PER_SEC;   /* 1 second */
  static int min_wakeup_granularity_ns;                 /* 0 usecs */
-@@ -235,6 +236,7 @@ static int max_wakeup_granularity_ns = NSEC_PER_SEC;       /* 1 second */
+@@ -235,6 +236,7 @@ static int max_wakeup_granularity_ns = N
  #endif
  
  static struct ctl_table kern_table[] = {
@@ -7721,10 +7712,10 @@ index 3d56fe7..1fe0a2d 100644
  #if defined(CONFIG_S390) && defined(CONFIG_SMP)
        {
                .ctl_name       = KERN_SPIN_RETRY,
-diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
-index ef1586d..5677d7f 100644
---- a/kernel/time/tick-sched.c
-+++ b/kernel/time/tick-sched.c
+Index: kernel-2.6.28/kernel/time/tick-sched.c
+===================================================================
+--- kernel-2.6.28.orig/kernel/time/tick-sched.c
++++ kernel-2.6.28/kernel/time/tick-sched.c
 @@ -447,6 +447,7 @@ void tick_nohz_restart_sched_tick(void)
        tick_do_update_jiffies64(now);
        cpu_clear(cpu, nohz_cpu_mask);
@@ -7745,11 +7736,11 @@ index ef1586d..5677d7f 100644
        }
  
        touch_softlockup_watchdog();
-diff --git a/kernel/timer.c b/kernel/timer.c
-index 15e4f90..f62d67b 100644
---- a/kernel/timer.c
-+++ b/kernel/timer.c
-@@ -1021,20 +1021,21 @@ unsigned long get_next_timer_interrupt(unsigned long now)
+Index: kernel-2.6.28/kernel/timer.c
+===================================================================
+--- kernel-2.6.28.orig/kernel/timer.c
++++ kernel-2.6.28/kernel/timer.c
+@@ -1021,20 +1021,21 @@ unsigned long get_next_timer_interrupt(u
  }
  #endif
  
@@ -7786,7 +7777,7 @@ index 15e4f90..f62d67b 100644
        account_process_tick(p, user_tick);
        run_local_timers();
        if (rcu_pending(cpu))
-@@ -1098,8 +1099,7 @@ static inline void calc_load(unsigned long ticks)
+@@ -1098,8 +1099,7 @@ static inline void calc_load(unsigned lo
  
  /*
   * This function runs timers and the timer-tq in bottom half context.
@@ -7796,10 +7787,10 @@ index 15e4f90..f62d67b 100644
  {
        struct tvec_base *base = __get_cpu_var(tvec_bases);
  
-diff --git a/kernel/workqueue.c b/kernel/workqueue.c
-index d4dc69d..9041f86 100644
---- a/kernel/workqueue.c
-+++ b/kernel/workqueue.c
+Index: kernel-2.6.28/kernel/workqueue.c
+===================================================================
+--- kernel-2.6.28.orig/kernel/workqueue.c
++++ kernel-2.6.28/kernel/workqueue.c
 @@ -323,7 +323,6 @@ static int worker_thread(void *__cwq)
        if (cwq->wq->freezeable)
                set_freezable();
@@ -7808,11 +7799,11 @@ index d4dc69d..9041f86 100644
  
        for (;;) {
                prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
-diff --git a/mm/oom_kill.c b/mm/oom_kill.c
-index a0a0190..4d35180 100644
---- a/mm/oom_kill.c
-+++ b/mm/oom_kill.c
-@@ -334,7 +334,7 @@ static void __oom_kill_task(struct task_struct *p, int verbose)
+Index: kernel-2.6.28/mm/oom_kill.c
+===================================================================
+--- kernel-2.6.28.orig/mm/oom_kill.c
++++ kernel-2.6.28/mm/oom_kill.c
+@@ -334,7 +334,7 @@ static void __oom_kill_task(struct task_
         * all the memory it needs. That way it should be able to
         * exit() and clear out its resources quickly...
         */
index ba34304..a8fde40 100644 (file)
@@ -31,6 +31,8 @@ overclock.diff
 #usbhost4.diff
 #usbhostmode.diff
 bfs.patch
+bfs-316-to-318.patch
+bfs-318-to-330.patch
 voltage_scaling_1.diff
 voltage_scaling_0.diff
 armthumb.diff
index a16b238..4bfeb32 100644 (file)
@@ -22,21 +22,34 @@ Index: kernel-2.6.28/arch/arm/mach-omap2/pm34xx.c
 ===================================================================
 --- kernel-2.6.28.orig/arch/arm/mach-omap2/pm34xx.c
 +++ kernel-2.6.28/arch/arm/mach-omap2/pm34xx.c
-@@ -1330,6 +1330,10 @@ static int voltagescale_vpforceupdate(u3
+@@ -1323,20 +1323,28 @@ static int voltagescale_vpforceupdate(u3
+       current_opp_no = get_opp_no(current_opp);
+       t2_smps_steps = abs(target_vsel - current_vsel);
+-      if (vdd == VDD1_OPP) {
++      if (vdd == PRCM_VDD1) {
+               vp_config_offs = OMAP3_PRM_VP1_CONFIG_OFFSET;
+               vp_tranxdone_st = OMAP3430_VP1_TRANXDONE_ST;
+               vpconfig = target_vsel << OMAP3430_INITVOLTAGE_SHIFT |
                                ((target_opp_no < VDD1_OPP3)
-                               ? PRM_VP1_CONFIG_ERRORGAIN_OPPLOW
-                               : PRM_VP1_CONFIG_ERRORGAIN_OPPHIGH);
+-                              ? PRM_VP1_CONFIG_ERRORGAIN_OPPLOW
+-                              : PRM_VP1_CONFIG_ERRORGAIN_OPPHIGH);
+-      } else if (vdd == VDD2_OPP) {
++                              ? PRM_VP1_CONFIG_ERRORGAIN_LOWOPP
++                              : PRM_VP1_CONFIG_ERRORGAIN_HIGHOPP);
 +              prm_rmw_mod_reg_bits(OMAP3430_VC_CMD_ON_MASK,
 +                              (target_vsel << OMAP3430_VC_CMD_ON_SHIFT),
 +                              OMAP3430_GR_MOD,
 +                              OMAP3_PRM_VC_CMD_VAL_0_OFFSET);
-       } else if (vdd == VDD2_OPP) {
++      } else if (vdd == PRCM_VDD2) {
                vp_config_offs = OMAP3_PRM_VP2_CONFIG_OFFSET;
                vp_tranxdone_st = OMAP3430_VP2_TRANXDONE_ST;
-@@ -1337,6 +1341,10 @@ static int voltagescale_vpforceupdate(u3
+               vpconfig = target_vsel << OMAP3430_INITVOLTAGE_SHIFT |
                                ((target_opp_no < VDD2_OPP3)
-                               ? PRM_VP2_CONFIG_ERRORGAIN_OPPLOW
-                               : PRM_VP2_CONFIG_ERRORGAIN_OPPHIGH);
+-                              ? PRM_VP2_CONFIG_ERRORGAIN_OPPLOW
+-                              : PRM_VP2_CONFIG_ERRORGAIN_OPPHIGH);
++                              ? PRM_VP2_CONFIG_ERRORGAIN_LOWOPP
++                              : PRM_VP2_CONFIG_ERRORGAIN_HIGHOPP);
 +              prm_rmw_mod_reg_bits(OMAP3430_VC_CMD_ON_MASK,
 +                              (target_vsel << OMAP3430_VC_CMD_ON_SHIFT),
 +                              OMAP3430_GR_MOD,