From 030610875444bb2fb75b47f3fd6279cb4aafc335 Mon Sep 17 00:00:00 2001 From: Peter Hunt Date: Mon, 29 Aug 2011 12:47:02 +0000 Subject: [PATCH] Minor changes to sequential BFS patches in line with prereqs --- .../debian/patches/bfs/bfs-318-to-330.patch | 17 ++----- .../debian/patches/bfs/bfs-330-to-350.patch | 23 ++------- .../debian/patches/bfs/bfs-357-to-360.patch | 50 ++++++++++---------- 3 files changed, 34 insertions(+), 56 deletions(-) diff --git a/kernel-bfs-2.6.28/debian/patches/bfs/bfs-318-to-330.patch b/kernel-bfs-2.6.28/debian/patches/bfs/bfs-318-to-330.patch index 7c9a557..b42ee67 100644 --- a/kernel-bfs-2.6.28/debian/patches/bfs/bfs-318-to-330.patch +++ b/kernel-bfs-2.6.28/debian/patches/bfs/bfs-318-to-330.patch @@ -101,12 +101,12 @@ Index: kernel-2.6.28/kernel/sched_bfs.c + +static inline int deadline_before(u64 deadline, u64 time) +{ -+ return (deadline < time); ++ return (s64)(time - deadline) > 0; +} + +static inline int deadline_after(u64 deadline, u64 time) +{ -+ return (deadline > time); ++ return (s64)(deadline - time) > 0; +} + +/* @@ -376,15 +376,6 @@ Index: kernel-2.6.28/kernel/sched_bfs.c } if (likely(queued_notrunning())) { -@@ -2461,7 +2530,7 @@ need_resched_nonpreemptible: - goto need_resched_nonpreemptible; - preempt_enable_no_resched(); - if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) -- goto need_resched; -+ goto need_resched; - } - EXPORT_SYMBOL(schedule); - @@ -2829,7 +2898,7 @@ void rt_mutex_setprio(struct task_struct BUG_ON(prio < 0 || prio > MAX_PRIO); @@ -405,8 +396,8 @@ Index: kernel-2.6.28/kernel/sched_bfs.c prio += delta; if (idleprio_task(p)) @@ -3126,7 +3196,7 @@ recheck: - if (policy == SCHED_NORMAL) - break; + if (policy == SCHED_BATCH) + goto out; if (policy != SCHED_IDLEPRIO) - return -EPERM; + return -EPERM; diff --git a/kernel-bfs-2.6.28/debian/patches/bfs/bfs-330-to-350.patch b/kernel-bfs-2.6.28/debian/patches/bfs/bfs-330-to-350.patch index ad90774..ff4b569 100644 --- a/kernel-bfs-2.6.28/debian/patches/bfs/bfs-330-to-350.patch +++ b/kernel-bfs-2.6.28/debian/patches/bfs/bfs-330-to-350.patch @@ -933,24 +933,21 @@ Index: kernel-2.6.28/kernel/sched_bfs.c set_rq_task(rq, next); grq.nr_switches++; prev->oncpu = 0; -@@ -2547,10 +2652,15 @@ need_resched_nonpreemptible: +@@ -2547,8 +2652,13 @@ need_resched_nonpreemptible: } else grq_unlock_irq(); - if (unlikely(reacquire_kernel_lock(current) < 0)) +rerun_prev_unlocked: + if (unlikely(reacquire_kernel_lock(current) < 0)) { -+// prev = rq->curr; -+// switch_count = &prev->nivcsw; ++ prev = rq->curr; ++ switch_count = &prev->nivcsw; goto need_resched_nonpreemptible; + } + preempt_enable_no_resched(); -- if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) -+ if (need_resched()) + if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) goto need_resched; - } - EXPORT_SYMBOL(schedule); @@ -3066,8 +3176,9 @@ int task_prio(const struct task_struct * if (prio <= 0) goto out; @@ -1017,17 +1014,6 @@ Index: kernel-2.6.28/kernel/sched_bfs.c rq->sd = NULL; rq->rd = NULL; rq->online = 0; -@@ -6219,10 +6334,6 @@ cputime_t task_stime(struct task_struct - } - #endif - --void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) --{ --} -- - inline cputime_t task_gtime(struct task_struct *p) - { - return p->gtime; Index: kernel-2.6.28/kernel/sysctl.c =================================================================== --- kernel-2.6.28.orig/kernel/sysctl.c @@ -1050,3 +1036,4 @@ Index: kernel-2.6.28/kernel/sysctl.c }, { .ctl_name = CTL_UNNUMBERED, + diff --git a/kernel-bfs-2.6.28/debian/patches/bfs/bfs-357-to-360.patch b/kernel-bfs-2.6.28/debian/patches/bfs/bfs-357-to-360.patch index 09b037f..79005f9 100644 --- a/kernel-bfs-2.6.28/debian/patches/bfs/bfs-357-to-360.patch +++ b/kernel-bfs-2.6.28/debian/patches/bfs/bfs-357-to-360.patch @@ -26,11 +26,11 @@ frequency scaling. kernel/sched_bfs.c | 89 ++++++++++++++++++++++-------------------------- 3 files changed, 43 insertions(+), 50 deletions(-) -Index: linux-2.6.28/include/linux/jiffies.h +Index: linux-2.6.36.2-bfs/include/linux/jiffies.h =================================================================== ---- linux-2.6.28.orig/include/linux/jiffies.h 2010-12-14 22:13:10.975304692 +1100 -+++ linux-2.6.28/include/linux/jiffies.h 2010-12-14 22:14:03.530569735 +1100 -@@ -154,7 +154,7 @@ static inline u64 get_jiffies_64(void) +--- linux-2.6.36.2-bfs.orig/include/linux/jiffies.h 2010-12-14 22:13:10.975304692 +1100 ++++ linux-2.6.36.2-bfs/include/linux/jiffies.h 2010-12-14 22:14:03.530569735 +1100 +@@ -164,7 +164,7 @@ static inline u64 get_jiffies_64(void) * Have the 32 bit jiffies value wrap 5 minutes after boot * so jiffies wrap bugs show up earlier. */ @@ -39,11 +39,11 @@ Index: linux-2.6.28/include/linux/jiffies.h /* * Change timeval to jiffies, trying to avoid the -Index: linux-2.6.28/include/linux/sched.h +Index: linux-2.6.36.2-bfs/include/linux/sched.h =================================================================== ---- linux-2.6.28.orig/include/linux/sched.h 2010-12-14 22:13:10.965304640 +1100 -+++ linux-2.6.28/include/linux/sched.h 2010-12-14 22:14:03.524569704 +1100 -@@ -1426,7 +1426,7 @@ static inline void tsk_cpus_current(stru +--- linux-2.6.36.2-bfs.orig/include/linux/sched.h 2010-12-14 22:13:10.965304640 +1100 ++++ linux-2.6.36.2-bfs/include/linux/sched.h 2010-12-14 22:14:03.524569704 +1100 +@@ -1532,7 +1532,7 @@ static inline void tsk_cpus_current(stru static inline void print_scheduler_version(void) { @@ -52,19 +52,19 @@ Index: linux-2.6.28/include/linux/sched.h } static inline int iso_task(struct task_struct *p) -Index: linux-2.6.28/kernel/sched_bfs.c +Index: linux-2.6.36.2-bfs/kernel/sched_bfs.c =================================================================== ---- linux-2.6.28.orig/kernel/sched_bfs.c 2010-12-14 22:13:10.983304734 +1100 -+++ linux-2.6.28/kernel/sched_bfs.c 2010-12-14 22:14:54.061814177 +1100 -@@ -204,7 +204,6 @@ struct rq { - #ifdef CONFIG_NO_HZ +--- linux-2.6.36.2-bfs.orig/kernel/sched_bfs.c 2010-12-14 22:13:10.983304734 +1100 ++++ linux-2.6.36.2-bfs/kernel/sched_bfs.c 2010-12-14 22:14:54.061814177 +1100 +@@ -190,7 +190,6 @@ struct rq { + u64 nohz_stamp; unsigned char in_nohz_recently; #endif - struct task_struct *last_task; #endif struct task_struct *curr, *idle; -@@ -733,19 +732,12 @@ static int suitable_idle_cpus(struct tas +@@ -742,19 +741,12 @@ static int suitable_idle_cpus(struct tas static void resched_task(struct task_struct *p); @@ -90,7 +90,7 @@ Index: linux-2.6.28/kernel/sched_bfs.c /* * The best idle CPU is chosen according to the CPUIDLE ranking above where the -@@ -798,27 +790,28 @@ static void resched_best_idle(struct tas +@@ -807,27 +799,28 @@ static void resched_best_idle(struct tas } tmp_rq = cpu_rq(cpu_tmp); @@ -128,7 +128,7 @@ Index: linux-2.6.28/kernel/sched_bfs.c break; best_ranking = ranking; } -@@ -835,11 +828,11 @@ static inline void resched_suitable_idle +@@ -844,11 +837,11 @@ static inline void resched_suitable_idle /* * The cpu cache locality difference between CPUs is used to determine how far @@ -142,7 +142,7 @@ Index: linux-2.6.28/kernel/sched_bfs.c * deadlines before being taken onto another cpu, allowing for 2* the double * seen by separate CPUs above. * Simple summary: Virtual deadlines are equal on shared cache CPUs, double -@@ -848,12 +841,11 @@ static inline void resched_suitable_idle +@@ -857,12 +850,11 @@ static inline void resched_suitable_idle static inline int cache_distance(struct rq *task_rq, struct rq *rq, struct task_struct *p) { @@ -160,7 +160,7 @@ Index: linux-2.6.28/kernel/sched_bfs.c } #else /* CONFIG_SMP */ static inline void inc_qnr(void) -@@ -892,10 +884,6 @@ cache_distance(struct rq *task_rq, struc +@@ -900,10 +892,6 @@ cache_distance(struct rq *task_rq, struc { return 0; } @@ -171,7 +171,7 @@ Index: linux-2.6.28/kernel/sched_bfs.c #endif /* CONFIG_SMP */ /* -@@ -1287,10 +1275,10 @@ static void try_preempt(struct task_stru +@@ -1293,10 +1281,10 @@ static void try_preempt(struct task_stru return; } @@ -184,7 +184,7 @@ Index: linux-2.6.28/kernel/sched_bfs.c latest_deadline = 0; highest_prio = -1; -@@ -2597,7 +2585,7 @@ need_resched_nonpreemptible: +@@ -2745,7 +2738,7 @@ need_resched_nonpreemptible: prev->last_ran = rq->clock; /* Task changed affinity off this CPU */ @@ -193,17 +193,17 @@ Index: linux-2.6.28/kernel/sched_bfs.c resched_suitable_idle(prev); else if (!deactivate) { if (!queued_notrunning()) { -@@ -2639,8 +2627,6 @@ need_resched_nonpreemptible: - if (likely(prev != next)) { +@@ -2788,8 +2781,6 @@ need_resched_nonpreemptible: sched_info_switch(prev, next); + perf_event_task_sched_out(prev, next); - if (prev != idle) - set_last_task(rq, prev); set_rq_task(rq, next); grq.nr_switches++; prev->oncpu = 0; -@@ -6054,10 +6040,12 @@ void __init sched_init_smp(void) - cpu_set(other_cpu, rq->cache_siblings); +@@ -6541,10 +6532,12 @@ void __init sched_init_smp(void) + cpumask_set_cpu(other_cpu, &rq->cache_siblings); } #endif - if (sd->level <= SD_LV_MC) @@ -218,7 +218,7 @@ Index: linux-2.6.28/kernel/sched_bfs.c else continue; -@@ -6160,7 +6148,7 @@ void __init sched_init(void) +@@ -6650,7 +6643,7 @@ void __init sched_init(void) if (i == j) rq->cpu_locality[j] = 0; else -- 1.7.9.5