1 diff -Nurp kernel-2.6.28-20101501+0m5/arch/arm/include/asm/cacheflush.h kernel-2.6.28-20103103+0m5/arch/arm/include/asm/cacheflush.h
2 --- kernel-2.6.28-20101501+0m5/arch/arm/include/asm/cacheflush.h 2008-12-25 00:26:37.000000000 +0100
3 +++ kernel-2.6.28-20103103+0m5/arch/arm/include/asm/cacheflush.h 2012-12-16 13:35:56.024308851 +0100
5 * Please note that the implementation of these, and the required
6 * effects are cache-type (VIVT/VIPT/PIPT) specific.
8 - * flush_cache_kern_all()
11 * Unconditionally clean and invalidate the entire cache.
13 - * flush_cache_user_mm(mm)
16 * Clean and invalidate all user space cache entries
17 * before a change of page tables.
19 - * flush_cache_user_range(start, end, flags)
20 + * flush_user_range(start, end, flags)
22 * Clean and invalidate a range of cache entries in the
23 * specified address space before a change of page tables.
25 * - start - virtual start address
26 * - end - virtual end address
28 + * coherent_user_range(start, end)
30 + * Ensure coherency between the Icache and the Dcache in the
31 + * region described by start, end. If you have non-snooping
32 + * Harvard caches, you need to implement this function.
33 + * - start - virtual start address
34 + * - end - virtual end address
36 + * flush_kern_dcache_area(kaddr, size)
38 + * Ensure that the data held in page is written back.
39 + * - kaddr - page address
40 + * - size - region size
45 @@ -375,7 +389,7 @@ extern void flush_ptrace_access(struct v
46 * Harvard caches are synchronised for the user space address range.
47 * This is used for the ARM private sys_cacheflush system call.
49 -#define flush_cache_user_range(vma,start,end) \
50 +#define flush_cache_user_range(start,end) \
51 __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
54 diff -Nurp kernel-2.6.28-20101501+0m5/arch/arm/kernel/traps.c kernel-2.6.28-20103103+0m5/arch/arm/kernel/traps.c
55 --- kernel-2.6.28-20101501+0m5/arch/arm/kernel/traps.c 2012-12-16 13:28:45.468315523 +0100
56 +++ kernel-2.6.28-20103103+0m5/arch/arm/kernel/traps.c 2012-12-16 13:35:56.024308851 +0100
57 @@ -418,7 +418,9 @@ do_cache_op(unsigned long start, unsigne
58 if (end > vma->vm_end)
61 - flush_cache_user_range(vma, start, end);
62 + up_read(&mm->mmap_sem);
63 + flush_cache_user_range(start, end);
66 up_read(&mm->mmap_sem);
68 diff -Nurp kernel-2.6.28-20101501+0m5/arch/arm/mach-omap2/smartreflex.c kernel-2.6.28-20103103+0m5/arch/arm/mach-omap2/smartreflex.c
69 --- kernel-2.6.28-20101501+0m5/arch/arm/mach-omap2/smartreflex.c 2012-12-16 13:30:17.084314106 +0100
70 +++ kernel-2.6.28-20103103+0m5/arch/arm/mach-omap2/smartreflex.c 2012-12-16 13:35:56.024308851 +0100
71 @@ -890,7 +890,7 @@ int sr_voltagescale_vcbypass(u32 target_
75 - if (sr->is_autocomp_active) {
76 + if (sr->is_autocomp_active && !sr->is_sr_reset) {
77 WARN(1, "SR: Must not transmit VCBYPASS command while SR is "
80 diff -Nurp kernel-2.6.28-20101501+0m5/arch/arm/mm/fault.c kernel-2.6.28-20103103+0m5/arch/arm/mm/fault.c
81 --- kernel-2.6.28-20101501+0m5/arch/arm/mm/fault.c 2012-12-16 13:28:45.472315523 +0100
82 +++ kernel-2.6.28-20103103+0m5/arch/arm/mm/fault.c 2012-12-16 13:35:56.024308851 +0100
83 @@ -387,6 +387,9 @@ do_translation_fault(unsigned long addr,
85 return do_page_fault(addr, fsr, regs);
87 + if (user_mode(regs))
90 index = pgd_index(addr);
93 @@ -449,7 +452,12 @@ static struct fsr_info {
94 { do_bad, SIGILL, BUS_ADRALN, "alignment exception" },
95 { do_bad, SIGKILL, 0, "terminal exception" },
96 { do_bad, SIGILL, BUS_ADRALN, "alignment exception" },
97 +/* Do we need runtime check ? */
98 +#if __LINUX_ARM_ARCH__ < 6
99 { do_bad, SIGBUS, 0, "external abort on linefetch" },
101 + { do_translation_fault, SIGSEGV, SEGV_MAPERR, "I-cache maintenance fault" },
103 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" },
104 { do_bad, SIGBUS, 0, "external abort on linefetch" },
105 { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" },
106 diff -Nurp kernel-2.6.28-20101501+0m5/arch/arm/mm/mmu.c kernel-2.6.28-20103103+0m5/arch/arm/mm/mmu.c
107 --- kernel-2.6.28-20101501+0m5/arch/arm/mm/mmu.c 2012-12-16 13:28:30.840315752 +0100
108 +++ kernel-2.6.28-20103103+0m5/arch/arm/mm/mmu.c 2012-12-16 13:35:56.028308851 +0100
109 @@ -953,4 +953,6 @@ void setup_mm_for_reboot(char mode)
110 pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
111 flush_pmd_entry(pmd);
114 + local_flush_tlb_all();
116 diff -Nurp kernel-2.6.28-20101501+0m5/arch/arm/mm/proc-v6.S kernel-2.6.28-20103103+0m5/arch/arm/mm/proc-v6.S
117 --- kernel-2.6.28-20101501+0m5/arch/arm/mm/proc-v6.S 2008-12-25 00:26:37.000000000 +0100
118 +++ kernel-2.6.28-20103103+0m5/arch/arm/mm/proc-v6.S 2012-12-16 13:35:56.028308851 +0100
119 @@ -56,8 +56,6 @@ ENTRY(cpu_v6_proc_fin)
120 * to what would be the reset vector.
122 * - loc - location to jump to for soft reset
124 - * It is assumed that:
128 diff -Nurp kernel-2.6.28-20101501+0m5/arch/arm/mm/proc-v7.S kernel-2.6.28-20103103+0m5/arch/arm/mm/proc-v7.S
129 --- kernel-2.6.28-20101501+0m5/arch/arm/mm/proc-v7.S 2008-12-25 00:26:37.000000000 +0100
130 +++ kernel-2.6.28-20103103+0m5/arch/arm/mm/proc-v7.S 2012-12-16 13:35:56.028308851 +0100
131 @@ -28,7 +28,14 @@ ENTRY(cpu_v7_proc_init)
132 ENDPROC(cpu_v7_proc_init)
134 ENTRY(cpu_v7_proc_fin)
137 + cpsid if @ disable interrupts
138 + bl v7_flush_kern_cache_all
139 + mrc p15, 0, r0, c1, c0, 0 @ ctrl register
140 + bic r0, r0, #0x1000 @ ...i............
141 + bic r0, r0, #0x0006 @ .............ca.
142 + mcr p15, 0, r0, c1, c0, 0 @ disable caches
144 ENDPROC(cpu_v7_proc_fin)
147 @@ -39,8 +46,6 @@ ENDPROC(cpu_v7_proc_fin)
148 * to what would be the reset vector.
150 * - loc - location to jump to for soft reset
152 - * It is assumed that:
156 diff -Nurp kernel-2.6.28-20101501+0m5/block/cfq-iosched.c kernel-2.6.28-20103103+0m5/block/cfq-iosched.c
157 --- kernel-2.6.28-20101501+0m5/block/cfq-iosched.c 2008-12-25 00:26:37.000000000 +0100
158 +++ kernel-2.6.28-20103103+0m5/block/cfq-iosched.c 2012-12-16 13:35:56.028308851 +0100
159 @@ -84,6 +84,11 @@ struct cfq_data {
161 struct cfq_rb_root service_tree;
162 unsigned int busy_queues;
164 + * Used to track any pending rt requests so we can pre-empt current
165 + * non-RT cfqq in service when this value is non-zero.
167 + unsigned int busy_rt_queues;
171 @@ -155,6 +160,7 @@ struct cfq_queue {
173 unsigned long slice_end;
175 + unsigned int slice_dispatch;
177 /* pending metadata requests */
179 @@ -171,13 +177,12 @@ struct cfq_queue {
180 enum cfqq_state_flags {
181 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
182 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
183 + CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */
184 CFQ_CFQQ_FLAG_must_alloc, /* must be allowed rq alloc */
185 CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
186 - CFQ_CFQQ_FLAG_must_dispatch, /* must dispatch, even if expired */
187 CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
188 CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */
189 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
190 - CFQ_CFQQ_FLAG_queue_new, /* queue never been serviced */
191 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
192 CFQ_CFQQ_FLAG_sync, /* synchronous queue */
194 @@ -198,13 +203,12 @@ static inline int cfq_cfqq_##name(const
197 CFQ_CFQQ_FNS(wait_request);
198 +CFQ_CFQQ_FNS(must_dispatch);
199 CFQ_CFQQ_FNS(must_alloc);
200 CFQ_CFQQ_FNS(must_alloc_slice);
201 -CFQ_CFQQ_FNS(must_dispatch);
202 CFQ_CFQQ_FNS(fifo_expire);
203 CFQ_CFQQ_FNS(idle_window);
204 CFQ_CFQQ_FNS(prio_changed);
205 -CFQ_CFQQ_FNS(queue_new);
206 CFQ_CFQQ_FNS(slice_new);
209 @@ -562,6 +566,8 @@ static void cfq_add_cfqq_rr(struct cfq_d
210 BUG_ON(cfq_cfqq_on_rr(cfqq));
211 cfq_mark_cfqq_on_rr(cfqq);
213 + if (cfq_class_rt(cfqq))
214 + cfqd->busy_rt_queues++;
216 cfq_resort_rr_list(cfqd, cfqq);
218 @@ -581,6 +587,8 @@ static void cfq_del_cfqq_rr(struct cfq_d
220 BUG_ON(!cfqd->busy_queues);
222 + if (cfq_class_rt(cfqq))
223 + cfqd->busy_rt_queues--;
227 @@ -765,10 +773,15 @@ static void __cfq_set_active_queue(struc
229 cfq_log_cfqq(cfqd, cfqq, "set_active");
231 + cfqq->slice_dispatch = 0;
233 + cfq_clear_cfqq_wait_request(cfqq);
234 + cfq_clear_cfqq_must_dispatch(cfqq);
235 cfq_clear_cfqq_must_alloc_slice(cfqq);
236 cfq_clear_cfqq_fifo_expire(cfqq);
237 cfq_mark_cfqq_slice_new(cfqq);
238 - cfq_clear_cfqq_queue_new(cfqq);
240 + del_timer(&cfqd->idle_slice_timer);
243 cfqd->active_queue = cfqq;
244 @@ -786,7 +799,6 @@ __cfq_slice_expired(struct cfq_data *cfq
245 if (cfq_cfqq_wait_request(cfqq))
246 del_timer(&cfqd->idle_slice_timer);
248 - cfq_clear_cfqq_must_dispatch(cfqq);
249 cfq_clear_cfqq_wait_request(cfqq);
252 @@ -915,7 +927,6 @@ static void cfq_arm_slice_timer(struct c
253 (sample_valid(cic->ttime_samples) && cic->ttime_mean > 2))
256 - cfq_mark_cfqq_must_dispatch(cfqq);
257 cfq_mark_cfqq_wait_request(cfqq);
260 @@ -1001,10 +1012,24 @@ static struct cfq_queue *cfq_select_queu
262 * The active queue has run out of time, expire it and select new.
264 - if (cfq_slice_used(cfqq))
265 + if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq))
269 + * If we have a RT cfqq waiting, then we pre-empt the current non-rt
272 + if (!cfq_class_rt(cfqq) && cfqd->busy_rt_queues) {
274 + * We simulate this as cfqq timed out so that it gets to bank
275 + * the remaining of its time slice.
277 + cfq_log_cfqq(cfqd, cfqq, "preempt");
278 + cfq_slice_expired(cfqd, 1);
283 * The active queue has requests and isn't expired, allow it to
286 @@ -1030,59 +1055,6 @@ keep_queue:
291 - * Dispatch some requests from cfqq, moving them to the request queue
295 -__cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
298 - int dispatched = 0;
300 - BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
303 - struct request *rq;
306 - * follow expired path, else get first next available
308 - rq = cfq_check_fifo(cfqq);
310 - rq = cfqq->next_rq;
313 - * finally, insert request into driver dispatch list
315 - cfq_dispatch_insert(cfqd->queue, rq);
319 - if (!cfqd->active_cic) {
320 - atomic_inc(&RQ_CIC(rq)->ioc->refcount);
321 - cfqd->active_cic = RQ_CIC(rq);
324 - if (RB_EMPTY_ROOT(&cfqq->sort_list))
327 - } while (dispatched < max_dispatch);
330 - * expire an async queue immediately if it has used up its slice. idle
331 - * queue always expire after 1 dispatch round.
333 - if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
334 - dispatched >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
335 - cfq_class_idle(cfqq))) {
336 - cfqq->slice_end = jiffies + 1;
337 - cfq_slice_expired(cfqd, 0);
343 static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
346 @@ -1116,11 +1088,45 @@ static int cfq_forced_dispatch(struct cf
351 + * Dispatch a request from cfqq, moving them to the request queue
354 +static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
356 + struct request *rq;
358 + BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
361 + * follow expired path, else get first next available
363 + rq = cfq_check_fifo(cfqq);
365 + rq = cfqq->next_rq;
368 + * insert request into driver dispatch list
370 + cfq_dispatch_insert(cfqd->queue, rq);
372 + if (!cfqd->active_cic) {
373 + struct cfq_io_context *cic = RQ_CIC(rq);
375 + atomic_inc(&cic->ioc->refcount);
376 + cfqd->active_cic = cic;
381 + * Find the cfqq that we need to service and move a request from that to the
384 static int cfq_dispatch_requests(struct request_queue *q, int force)
386 struct cfq_data *cfqd = q->elevator->elevator_data;
387 struct cfq_queue *cfqq;
389 + unsigned int max_dispatch;
391 if (!cfqd->busy_queues)
393 @@ -1128,33 +1134,63 @@ static int cfq_dispatch_requests(struct
395 return cfq_forced_dispatch(cfqd);
398 - while ((cfqq = cfq_select_queue(cfqd)) != NULL) {
400 + cfqq = cfq_select_queue(cfqd);
404 - max_dispatch = cfqd->cfq_quantum;
406 + * If this is an async queue and we have sync IO in flight, let it wait
408 + if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
411 + max_dispatch = cfqd->cfq_quantum;
412 + if (cfq_class_idle(cfqq))
416 + * Does this cfqq already have too much IO in flight?
418 + if (cfqq->dispatched >= max_dispatch) {
420 + * idle queue must always only have a single IO in flight
422 if (cfq_class_idle(cfqq))
426 - if (cfqq->dispatched >= max_dispatch) {
427 - if (cfqd->busy_queues > 1)
429 - if (cfqq->dispatched >= 4 * max_dispatch)
433 + * We have other queues, don't allow more IO from this one
435 + if (cfqd->busy_queues > 1)
438 - if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
441 + * we are the only queue, allow up to 4 times of 'quantum'
443 + if (cfqq->dispatched >= 4 * max_dispatch)
447 - cfq_clear_cfqq_must_dispatch(cfqq);
448 - cfq_clear_cfqq_wait_request(cfqq);
449 - del_timer(&cfqd->idle_slice_timer);
451 + * Dispatch a request from this cfqq
453 + cfq_dispatch_request(cfqd, cfqq);
454 + cfqq->slice_dispatch++;
455 + cfq_clear_cfqq_must_dispatch(cfqq);
457 - dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
459 + * expire an async queue immediately if it has used up its slice. idle
460 + * queue always expire after 1 dispatch round.
462 + if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
463 + cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
464 + cfq_class_idle(cfqq))) {
465 + cfqq->slice_end = jiffies + 1;
466 + cfq_slice_expired(cfqd, 0);
469 - cfq_log(cfqd, "dispatched=%d", dispatched);
471 + cfq_log(cfqd, "dispatched a request");
476 @@ -1318,7 +1354,15 @@ static void cfq_exit_single_io_context(s
479 spin_lock_irqsave(q->queue_lock, flags);
480 - __cfq_exit_single_io_context(cfqd, cic);
483 + * Ensure we get a fresh copy of the ->key to prevent
484 + * race between exiting task and queue
486 + smp_read_barrier_depends();
488 + __cfq_exit_single_io_context(cfqd, cic);
490 spin_unlock_irqrestore(q->queue_lock, flags);
493 @@ -1472,7 +1516,6 @@ retry:
496 cfq_mark_cfqq_prio_changed(cfqq);
497 - cfq_mark_cfqq_queue_new(cfqq);
499 cfq_init_prio_data(cfqq, ioc);
501 @@ -1797,6 +1840,12 @@ cfq_should_preempt(struct cfq_data *cfqd
502 if (rq_is_meta(rq) && !cfqq->meta_pending)
506 + * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
508 + if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
511 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
514 @@ -1853,23 +1902,28 @@ cfq_rq_enqueued(struct cfq_data *cfqd, s
516 if (cfqq == cfqd->active_queue) {
518 - * if we are waiting for a request for this queue, let it rip
519 - * immediately and flag that we must not expire this queue
521 + * Remember that we saw a request from this process, but
522 + * don't start queuing just yet. Otherwise we risk seeing lots
523 + * of tiny requests, because we disrupt the normal plugging
524 + * and merging. If the request is already larger than a single
525 + * page, let it rip immediately. For that case we assume that
526 + * merging is already done.
528 if (cfq_cfqq_wait_request(cfqq)) {
529 + if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE) {
530 + del_timer(&cfqd->idle_slice_timer);
531 + blk_start_queueing(cfqd->queue);
533 cfq_mark_cfqq_must_dispatch(cfqq);
534 - del_timer(&cfqd->idle_slice_timer);
535 - blk_start_queueing(cfqd->queue);
537 } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
539 * not the active queue - expire current slice if it is
540 * idle and has expired it's mean thinktime or this new queue
541 - * has some old slice time left and is of higher priority
542 + * has some old slice time left and is of higher priority or
543 + * this new queue is RT and the current one is BE
545 cfq_preempt_queue(cfqd, cfqq);
546 - cfq_mark_cfqq_must_dispatch(cfqq);
547 blk_start_queueing(cfqd->queue);
550 @@ -2129,6 +2183,12 @@ static void cfq_idle_slice_timer(unsigne
554 + * We saw a request before the queue expired, let it through
556 + if (cfq_cfqq_must_dispatch(cfqq))
562 if (cfq_slice_used(cfqq))
563 @@ -2144,10 +2204,8 @@ static void cfq_idle_slice_timer(unsigne
565 * not expired and it has a request pending, let it dispatch
567 - if (!RB_EMPTY_ROOT(&cfqq->sort_list)) {
568 - cfq_mark_cfqq_must_dispatch(cfqq);
569 + if (!RB_EMPTY_ROOT(&cfqq->sort_list))
574 cfq_slice_expired(cfqd, timed_out);
575 diff -Nurp kernel-2.6.28-20101501+0m5/drivers/dsp/bridge/rmgr/drv.c kernel-2.6.28-20103103+0m5/drivers/dsp/bridge/rmgr/drv.c
576 --- kernel-2.6.28-20101501+0m5/drivers/dsp/bridge/rmgr/drv.c 2012-12-16 13:29:16.884315037 +0100
577 +++ kernel-2.6.28-20103103+0m5/drivers/dsp/bridge/rmgr/drv.c 2012-12-16 13:35:56.028308851 +0100
578 @@ -517,11 +517,12 @@ DSP_STATUS DRV_ProcFreeDMMRes(HANDLE hPC
580 pDMMList = pDMMList->next;
581 if (pDMMRes->dmmAllocated) {
582 - status = PROC_UnMap(pDMMRes->hProcessor,
583 - (void *)pDMMRes->ulDSPResAddr, pCtxt);
584 - status = PROC_UnReserveMemory(pDMMRes->hProcessor,
585 - (void *)pDMMRes->ulDSPResAddr);
586 - pDMMRes->dmmAllocated = 0;
587 + /* PROC_UnMap frees pDMMRes */
588 + void *processor = pDMMRes->hProcessor;
589 + void *map_addr = (void*)pDMMRes->ulDSPAddr;
590 + void *rsv_addr = (void*)pDMMRes->ulDSPResAddr;
591 + status = PROC_UnMap(processor, map_addr, pCtxt);
592 + status = PROC_UnReserveMemory(processor, rsv_addr);
596 diff -Nurp kernel-2.6.28-20101501+0m5/drivers/dsp/bridge/rmgr/proc.c kernel-2.6.28-20103103+0m5/drivers/dsp/bridge/rmgr/proc.c
597 --- kernel-2.6.28-20101501+0m5/drivers/dsp/bridge/rmgr/proc.c 2012-12-16 13:29:16.888315037 +0100
598 +++ kernel-2.6.28-20103103+0m5/drivers/dsp/bridge/rmgr/proc.c 2012-12-16 13:35:56.032308853 +0100
599 @@ -750,6 +750,7 @@ static int memory_sync_vma(unsigned long
607 diff -Nurp kernel-2.6.28-20101501+0m5/drivers/i2c/chips/lis302dl.c kernel-2.6.28-20103103+0m5/drivers/i2c/chips/lis302dl.c
608 --- kernel-2.6.28-20101501+0m5/drivers/i2c/chips/lis302dl.c 2012-12-16 13:29:16.928315036 +0100
609 +++ kernel-2.6.28-20103103+0m5/drivers/i2c/chips/lis302dl.c 2012-12-16 13:35:56.032308853 +0100
611 # define LIS302_CTRL1_Y (1 << 1)
612 # define LIS302_CTRL1_X (1 << 0)
613 #define LIS302_CTRL_2 0x21
614 +# define LIS302_CTRL2_BOOT (1 << 6)
615 #define LIS302_CTRL_3 0x22
616 # define LIS302_CTRL3_GND 0x00
617 # define LIS302_CTRL3_FF_WU_1 0x01
618 @@ -161,8 +162,13 @@ static int lis302dl_configure(struct i2c
623 - /* Control High Pass filter selection. not used */
625 + * Boot is used to refresh internal registers
626 + * Control High Pass filter selection. not used
628 + ret = lis302dl_write(c, LIS302_CTRL_2, LIS302_CTRL2_BOOT);
633 * Interrupt CTRL register. One interrupt pin is used for
634 diff -Nurp kernel-2.6.28-20101501+0m5/drivers/leds/leds-lp5523.c kernel-2.6.28-20103103+0m5/drivers/leds/leds-lp5523.c
635 --- kernel-2.6.28-20101501+0m5/drivers/leds/leds-lp5523.c 2012-12-16 13:28:34.784315691 +0100
636 +++ kernel-2.6.28-20103103+0m5/drivers/leds/leds-lp5523.c 2012-12-16 13:35:56.032308853 +0100
638 #include <linux/wait.h>
639 #include <linux/leds.h>
640 #include <linux/leds-lp5523.h>
641 +#include <linux/workqueue.h>
643 #define LP5523_DRIVER_NAME "lp5523"
644 #define LP5523_REG_ENABLE 0x00
645 @@ -120,6 +121,8 @@ struct lp5523_led {
648 struct led_classdev cdev;
649 + struct work_struct brightness_work;
654 @@ -161,6 +164,8 @@ static int lp5523_load_program(struct lp
655 static void lp5523_work(struct work_struct *work);
656 static irqreturn_t lp5523_irq(int irq, void *_chip);
658 +static void lp5523_led_brightness_work(struct work_struct *work);
661 static int lp5523_write(struct i2c_client *client, u8 reg, u8 value)
663 @@ -476,6 +481,16 @@ static void lp5523_set_brightness(struct
664 enum led_brightness brightness)
666 struct lp5523_led *led = cdev_to_led(cdev);
667 + led->brightness = (u8)brightness;
669 + schedule_work(&led->brightness_work);
672 +static void lp5523_led_brightness_work(struct work_struct *work)
674 + struct lp5523_led *led = container_of(work,
677 struct lp5523_chip *chip = led_to_lp5523(led);
678 struct i2c_client *client = chip->client;
680 @@ -483,7 +498,7 @@ static void lp5523_set_brightness(struct
683 LP5523_REG_LED_PWM_BASE + led->led_nr,
687 mutex_unlock(&chip->lock);
689 @@ -907,6 +922,8 @@ static int lp5523_probe(struct i2c_clien
690 dev_err(&client->dev, "error initializing leds\n");
693 + INIT_WORK(&(chip->leds[i].brightness_work),
694 + lp5523_led_brightness_work);
697 ret = lp5523_register_sysfs(client);
698 @@ -916,8 +933,10 @@ static int lp5523_probe(struct i2c_clien
702 - for (i = 0; i < pdata->num_leds; i++)
703 + for (i = 0; i < pdata->num_leds; i++) {
704 led_classdev_unregister(&chip->leds[i].cdev);
705 + cancel_work_sync(&chip->leds[i].brightness_work);
710 @@ -931,8 +950,10 @@ static int lp5523_remove(struct i2c_clie
712 lp5523_unregister_sysfs(client);
714 - for (i = 0; i < chip->num_leds; i++)
715 + for (i = 0; i < chip->num_leds; i++) {
716 led_classdev_unregister(&chip->leds[i].cdev);
717 + cancel_work_sync(&chip->leds[i].brightness_work);
722 diff -Nurp kernel-2.6.28-20101501+0m5/drivers/media/radio/radio-si4713.c kernel-2.6.28-20103103+0m5/drivers/media/radio/radio-si4713.c
723 --- kernel-2.6.28-20101501+0m5/drivers/media/radio/radio-si4713.c 2012-12-16 13:28:34.784315691 +0100
724 +++ kernel-2.6.28-20103103+0m5/drivers/media/radio/radio-si4713.c 2012-12-16 13:35:56.032308853 +0100
726 /* module parameters */
727 static int radio_nr = -1; /* radio device minor (-1 ==> auto assign) */
729 +/* properties lock for write operations */
730 +static int config_locked;
732 +/* saved power levels */
733 +static unsigned int max_pl;
734 +static unsigned int min_pl;
736 +/* structure for pid registration */
739 + struct list_head plist;
742 +#define APP_MAX_NUM 2
744 +static int pid_count;
745 +static LIST_HEAD(pid_list_head);
746 +static struct si4713_device *si4713_dev;
750 * Read and write functions
751 @@ -167,6 +186,37 @@ static DEVICE_ATTR(prop, S_IRUGO | S_IWU
752 si4713_##prop##_write);
755 + * Config lock property
757 +static ssize_t si4713_lock_write(struct device *dev,
758 + struct device_attribute *attr,
767 + sscanf(buf, "%d", &l);
775 +static ssize_t si4713_lock_read(struct device *dev,
776 + struct device_attribute *attr,
779 + return sprintf(buf, "%d\n", config_locked);
782 +static DEVICE_ATTR(lock, S_IRUGO | S_IWUSR, si4713_lock_read,
783 + si4713_lock_write);
786 * Power level property
788 /* power_level (rw) 88 - 115 or 0 */
789 @@ -179,6 +229,9 @@ static ssize_t si4713_power_level_write(
799 @@ -320,6 +373,7 @@ DEFINE_SYSFS_PROPERTY(tone_off_time, uns
800 value > MAX_TONE_OFF_TIME)
802 static struct attribute *attrs[] = {
803 + &dev_attr_lock.attr,
804 &dev_attr_power_level.attr,
805 &dev_attr_antenna_capacitor.attr,
806 &dev_attr_rds_pi.attr,
807 @@ -366,13 +420,118 @@ static irqreturn_t si4713_handler(int ir
811 +static int register_pid(pid_t pid)
813 + struct pid_list *pitem;
815 + list_for_each_entry(pitem, &pid_list_head, plist) {
816 + if (pitem->pid == pid)
820 + pitem = kmalloc(sizeof(struct pid_list), GFP_KERNEL);
827 + list_add(&(pitem->plist), &pid_list_head);
833 +static int unregister_pid(pid_t pid)
835 + struct pid_list *pitem, *n;
837 + list_for_each_entry_safe(pitem, n, &pid_list_head, plist) {
838 + if (pitem->pid == pid) {
839 + list_del(&(pitem->plist));
850 +static int si4713_priv_ioctl(struct inode *inode, struct file *file,
851 + unsigned int cmd, unsigned long arg)
856 + if (cmd != LOCK_LOW_POWER && cmd != RELEASE_LOW_POWER)
857 + return video_ioctl2(inode, file, cmd, arg);
859 + pl = si4713_get_power_level(si4713_dev);
866 + if (copy_from_user(&pow, (void __user *)arg, sizeof(pow))) {
871 + if (cmd == LOCK_LOW_POWER) {
873 + if (pid_count == APP_MAX_NUM) {
878 + if (pid_count == 0) {
883 + /* Set max possible power level */
889 + rval = register_pid(current->pid);
894 + /* Lower min power level if asked */
900 + } else { /* RELEASE_LOW_POWER */
901 + rval = unregister_pid(current->pid);
906 + if (pid_count == 0) {
911 + rval = si4713_set_power_level(si4713_dev, pow);
917 * si4713_fops - file operations interface
919 static const struct file_operations si4713_fops = {
920 .owner = THIS_MODULE,
922 - .ioctl = video_ioctl2,
923 + .ioctl = si4713_priv_ioctl,
924 .compat_ioctl = v4l_compat_ioctl32,
927 @@ -747,6 +906,9 @@ static int si4713_i2c_driver_probe(struc
931 + /* save to global pointer for it to be accesible from ioctl() call */
937 diff -Nurp kernel-2.6.28-20101501+0m5/drivers/media/radio/radio-si4713.h kernel-2.6.28-20103103+0m5/drivers/media/radio/radio-si4713.h
938 --- kernel-2.6.28-20101501+0m5/drivers/media/radio/radio-si4713.h 2012-12-16 13:28:30.960315749 +0100
939 +++ kernel-2.6.28-20103103+0m5/drivers/media/radio/radio-si4713.h 2012-12-16 13:35:56.032308853 +0100
941 #define SI4713_I2C_ADDR_BUSEN_HIGH 0x63
942 #define SI4713_I2C_ADDR_BUSEN_LOW 0x11
944 +#define LOCK_LOW_POWER _IOW('v', BASE_VIDIOC_PRIVATE + 0, unsigned int)
945 +#define RELEASE_LOW_POWER _IOW('v', BASE_VIDIOC_PRIVATE + 1, unsigned int)
948 * Platform dependent definition
950 diff -Nurp kernel-2.6.28-20101501+0m5/drivers/media/video/omap34xxcam.c kernel-2.6.28-20103103+0m5/drivers/media/video/omap34xxcam.c
951 --- kernel-2.6.28-20101501+0m5/drivers/media/video/omap34xxcam.c 2012-12-16 13:29:16.928315036 +0100
952 +++ kernel-2.6.28-20103103+0m5/drivers/media/video/omap34xxcam.c 2012-12-16 13:35:56.032308853 +0100
953 @@ -1833,6 +1833,7 @@ static int omap34xxcam_release(struct in
954 struct omap34xxcam_videodev *vdev = fh->vdev;
955 struct device *isp = vdev->cam->isp;
959 if (omap34xxcam_daemon_release(vdev, file))
961 @@ -1844,6 +1845,7 @@ static int omap34xxcam_release(struct in
962 omap34xxcam_slave_power_set(vdev, V4L2_POWER_STANDBY,
963 OMAP34XXCAM_SLAVE_POWER_ALL);
964 vdev->streaming = NULL;
968 if (atomic_dec_return(&vdev->users) == 0) {
969 @@ -1853,6 +1855,10 @@ static int omap34xxcam_release(struct in
971 mutex_unlock(&vdev->mutex);
974 + omap34xxcam_daemon_req_hw_reconfig(
975 + vdev, OMAP34XXCAM_DAEMON_HW_RECONFIG_STREAMOFF);
978 file->private_data = NULL;
980 diff -Nurp kernel-2.6.28-20101501+0m5/drivers/mmc/host/omap_hsmmc.c kernel-2.6.28-20103103+0m5/drivers/mmc/host/omap_hsmmc.c
981 --- kernel-2.6.28-20101501+0m5/drivers/mmc/host/omap_hsmmc.c 2012-12-16 13:30:14.172314148 +0100
982 +++ kernel-2.6.28-20103103+0m5/drivers/mmc/host/omap_hsmmc.c 2012-12-16 13:35:56.036308854 +0100
984 /* Timeouts for entering power saving states on inactivity, msec */
985 #define OMAP_MMC_DISABLED_TIMEOUT 100
986 #define OMAP_MMC_SLEEP_TIMEOUT 1000
987 +#define OMAP_MMC_OFF_NOSLP_TIMEOUT 3000
988 #define OMAP_MMC_OFF_TIMEOUT 8000
991 @@ -1249,21 +1250,21 @@ static void omap_hsmmc_conf_bus_power(st
994 * Dynamic power saving handling, FSM:
995 - * ENABLED -> DISABLED -> CARDSLEEP / REGSLEEP -> OFF
996 - * ^___________| | |
997 - * |______________________|______________________|
998 + * ENABLED -> DISABLED -> EXTDISABLED / CARDSLEEP / REGSLEEP -> OFF
999 + * ^___________| | |
1000 + * |____________________________________|______________________|
1002 - * ENABLED: mmc host is fully functional
1003 - * DISABLED: fclk is off
1004 - * CARDSLEEP: fclk is off, card is asleep, voltage regulator is asleep
1005 - * REGSLEEP: fclk is off, voltage regulator is asleep
1006 - * OFF: fclk is off, voltage regulator is off
1007 + * ENABLED: mmc host is fully functional
1008 + * (EXT)DISABLED: fclk is off
1009 + * CARDSLEEP: fclk is off, card is asleep, voltage regulator is asleep
1010 + * REGSLEEP: fclk is off, voltage regulator is asleep
1011 + * OFF: fclk is off, voltage regulator is off
1013 * Transition handlers return the timeout for the next state transition
1014 * or negative error.
1017 -enum {ENABLED = 0, DISABLED, CARDSLEEP, REGSLEEP, OFF};
1018 +enum {ENABLED = 0, DISABLED, EXTDISABLED, CARDSLEEP, REGSLEEP, OFF};
1020 /* Handler for [ENABLED -> DISABLED] transition */
1021 static int omap_hsmmc_enabled_to_disabled(struct omap_hsmmc_host *host)
1022 @@ -1300,7 +1301,21 @@ static int omap_hsmmc_full_sleep(struct
1026 -/* Handler for [DISABLED -> REGSLEEP / CARDSLEEP] transition */
1027 +/* Big SD cards (16GiB) are prohibited from
1028 + switching voltage regulator to asleep
1029 + because of high current consumption */
1030 +static int omap_hsmmc_support_sleep(struct mmc_host *mmc)
1032 + if (!(mmc->caps & MMC_CAP_NONREMOVABLE) &&
1033 + ((u64)mmc->card->csd.capacity << mmc->card->csd.read_blkbits) >
1034 + 14ULL * 1024 * 1024 * 1024) {
1041 +/* Handler for [DISABLED -> EXTDISABLED / REGSLEEP / CARDSLEEP] transition */
1042 static int omap_hsmmc_disabled_to_sleep(struct omap_hsmmc_host *host)
1044 int err, new_state, sleep;
1045 @@ -1319,12 +1334,12 @@ static int omap_hsmmc_disabled_to_sleep(
1047 new_state = CARDSLEEP;
1049 - new_state = REGSLEEP;
1050 + new_state = omap_hsmmc_support_sleep(host->mmc) ? REGSLEEP : EXTDISABLED;
1053 sleep = omap_hsmmc_full_sleep(host->mmc->card) &&
1054 (new_state == CARDSLEEP);
1055 - if (mmc_slot(host).set_sleep)
1056 + if (mmc_slot(host).set_sleep && new_state != EXTDISABLED)
1057 mmc_slot(host).set_sleep(host->dev, host->slot_id, 1, 0,
1059 /* FIXME: turn off bus power and perhaps interrupts too */
1060 @@ -1334,18 +1349,20 @@ static int omap_hsmmc_disabled_to_sleep(
1061 mmc_release_host(host->mmc);
1063 dev_dbg(mmc_dev(host->mmc), "DISABLED -> %s\n",
1064 - host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP");
1065 + host->dpm_state == CARDSLEEP ? "CARDSLEEP" :
1066 + host->dpm_state == REGSLEEP ? "REGSLEEP" : "EXTDISABLED");
1068 if ((host->mmc->caps & MMC_CAP_NONREMOVABLE) ||
1069 mmc_slot(host).card_detect ||
1070 (mmc_slot(host).get_cover_state &&
1071 mmc_slot(host).get_cover_state(host->dev, host->slot_id)))
1072 - return msecs_to_jiffies(OMAP_MMC_OFF_TIMEOUT);
1073 + return msecs_to_jiffies(new_state == EXTDISABLED ?
1074 + OMAP_MMC_OFF_NOSLP_TIMEOUT : OMAP_MMC_OFF_TIMEOUT);
1079 -/* Handler for [REGSLEEP / CARDSLEEP -> OFF] transition */
1080 +/* Handler for [EXTDISABLED / REGSLEEP / CARDSLEEP -> OFF] transition */
1081 static int omap_hsmmc_sleep_to_off(struct omap_hsmmc_host *host)
1083 if (!mmc_try_claim_host(host->mmc))
1084 @@ -1364,7 +1381,8 @@ static int omap_hsmmc_sleep_to_off(struc
1085 host->power_mode = MMC_POWER_OFF;
1087 dev_dbg(mmc_dev(host->mmc), "%s -> OFF\n",
1088 - host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP");
1089 + host->dpm_state == CARDSLEEP ? "CARDSLEEP" :
1090 + host->dpm_state == REGSLEEP ? "REGSLEEP" : "EXTDISABLED");
1092 host->dpm_state = OFF;
1094 @@ -1405,14 +1423,15 @@ static int omap_hsmmc_sleep_to_enabled(s
1095 omap_hsmmc_context_restore(host);
1096 asleep = omap_hsmmc_full_sleep(host->mmc->card) &&
1097 (host->dpm_state == CARDSLEEP);
1098 - if (mmc_slot(host).set_sleep)
1099 + if (mmc_slot(host).set_sleep && host->dpm_state != EXTDISABLED)
1100 mmc_slot(host).set_sleep(host->dev, host->slot_id, 0,
1102 if (mmc_card_can_sleep(host->mmc))
1103 mmc_card_awake(host->mmc);
1105 dev_dbg(mmc_dev(host->mmc), "%s -> ENABLED\n",
1106 - host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP");
1107 + host->dpm_state == CARDSLEEP ? "CARDSLEEP" :
1108 + host->dpm_state == REGSLEEP ? "REGSLEEP" : "EXTDISABLED");
1110 if (host->pdata->set_pm_constraints)
1111 host->pdata->set_pm_constraints(host->dev, 1);
1112 @@ -1454,6 +1473,7 @@ static int omap_hsmmc_enable(struct mmc_
1113 switch (host->dpm_state) {
1115 return omap_hsmmc_disabled_to_enabled(host);
1119 return omap_hsmmc_sleep_to_enabled(host);
1120 @@ -1484,6 +1504,7 @@ static int omap_hsmmc_disable(struct mmc
1123 return omap_hsmmc_disabled_to_sleep(host);
1127 return omap_hsmmc_sleep_to_off(host);
1128 diff -Nurp kernel-2.6.28-20101501+0m5/drivers/net/wireless/wl12xx/wl1251_acx.c kernel-2.6.28-20103103+0m5/drivers/net/wireless/wl12xx/wl1251_acx.c
1129 --- kernel-2.6.28-20101501+0m5/drivers/net/wireless/wl12xx/wl1251_acx.c 2012-12-16 13:29:16.928315036 +0100
1130 +++ kernel-2.6.28-20103103+0m5/drivers/net/wireless/wl12xx/wl1251_acx.c 2012-12-16 13:35:56.036308854 +0100
1131 @@ -910,7 +910,7 @@ int wl1251_acx_tsf_info(struct wl1251 *w
1134 *mactime = tsf_info->current_tsf_lsb |
1135 - (tsf_info->current_tsf_msb << 31);
1136 + ((unsigned long long) tsf_info->current_tsf_msb << 32);
1140 diff -Nurp kernel-2.6.28-20101501+0m5/drivers/net/wireless/wl12xx/wl1251_cmd.c kernel-2.6.28-20103103+0m5/drivers/net/wireless/wl12xx/wl1251_cmd.c
1141 --- kernel-2.6.28-20101501+0m5/drivers/net/wireless/wl12xx/wl1251_cmd.c 2012-12-16 13:28:34.812315688 +0100
1142 +++ kernel-2.6.28-20103103+0m5/drivers/net/wireless/wl12xx/wl1251_cmd.c 2012-12-16 13:35:56.036308854 +0100
1143 @@ -242,7 +242,7 @@ int wl1251_cmd_data_path(struct wl1251 *
1145 wl1251_error("tx %s cmd for channel %d failed",
1146 enable ? "start" : "stop", channel);
1151 wl1251_debug(DEBUG_BOOT, "tx %s cmd channel %d",
1152 diff -Nurp kernel-2.6.28-20101501+0m5/drivers/usb/musb/musb_core.c kernel-2.6.28-20103103+0m5/drivers/usb/musb/musb_core.c
1153 --- kernel-2.6.28-20101501+0m5/drivers/usb/musb/musb_core.c 2012-12-16 13:29:04.852315222 +0100
1154 +++ kernel-2.6.28-20103103+0m5/drivers/usb/musb/musb_core.c 2012-12-16 13:35:59.224308804 +0100
1155 @@ -297,28 +297,23 @@ static int musb_charger_detect(struct mu
1160 - /* REVISIT: This code works only with dedicated chargers!
1161 - * When support for HOST/HUB chargers is added, don't
1164 + /* enable interrupts */
1165 + musb_writeb(musb->mregs, MUSB_INTRUSBE, ctx.intrusbe);
1167 + /* Make sure the communication starts normally */
1168 + r = musb_readb(musb->mregs, MUSB_POWER);
1169 + musb_writeb(musb->mregs, MUSB_POWER,
1170 + r | MUSB_POWER_RESUME);
1172 + musb_writeb(musb->mregs, MUSB_POWER,
1173 + r & ~MUSB_POWER_RESUME);
1174 + if (vdat && musb->xceiv->state != OTG_STATE_B_IDLE) {
1176 /* Regulators off */
1177 otg_set_suspend(musb->xceiv, 1);
1178 - musb->is_charger = 1;
1180 - /* enable interrupts */
1181 - musb_writeb(musb->mregs, MUSB_INTRUSBE, ctx.intrusbe);
1183 - /* Make sure the communication starts normally */
1184 - r = musb_readb(musb->mregs, MUSB_POWER);
1185 - musb_writeb(musb->mregs, MUSB_POWER,
1186 - r | MUSB_POWER_RESUME);
1188 - musb_writeb(musb->mregs, MUSB_POWER,
1189 - r & ~MUSB_POWER_RESUME);
1192 + musb->is_charger = vdat;
1196 diff -Nurp kernel-2.6.28-20101501+0m5/include/linux/sched.h kernel-2.6.28-20103103+0m5/include/linux/sched.h
1197 --- kernel-2.6.28-20101501+0m5/include/linux/sched.h 2012-12-16 13:28:34.848315688 +0100
1198 +++ kernel-2.6.28-20103103+0m5/include/linux/sched.h 2012-12-16 13:35:56.036308854 +0100
1199 @@ -1665,11 +1665,11 @@ extern void wake_up_idle_cpu(int cpu);
1200 static inline void wake_up_idle_cpu(int cpu) { }
1203 +extern unsigned int sysctl_sched_child_runs_first;
1204 #ifdef CONFIG_SCHED_DEBUG
1205 extern unsigned int sysctl_sched_latency;
1206 extern unsigned int sysctl_sched_min_granularity;
1207 extern unsigned int sysctl_sched_wakeup_granularity;
1208 -extern unsigned int sysctl_sched_child_runs_first;
1209 extern unsigned int sysctl_sched_features;
1210 extern unsigned int sysctl_sched_migration_cost;
1211 extern unsigned int sysctl_sched_nr_migrate;
1212 diff -Nurp kernel-2.6.28-20101501+0m5/include/linux/swap.h kernel-2.6.28-20103103+0m5/include/linux/swap.h
1213 --- kernel-2.6.28-20101501+0m5/include/linux/swap.h 2012-12-16 13:28:34.848315688 +0100
1214 +++ kernel-2.6.28-20103103+0m5/include/linux/swap.h 2012-12-16 13:35:56.036308854 +0100
1215 @@ -130,6 +130,17 @@ enum {
1216 #define SWAP_MAP_MAX 0x7fff
1217 #define SWAP_MAP_BAD 0x8000
1219 +#define SWAP_GAP_TREE_SIZE 10
1220 +#define SWAP_GAP_RESCAN_TIMEO_MSEC 2000
1221 +#define swap_gap_len(gap) ((gap)->end - (gap)->next)
1222 +#define swap_gap_rb_entry(node) rb_entry(node, struct swap_gap_node, rb_node)
1223 +/* Struct to store gaps info */
1224 +struct swap_gap_node {
1225 + struct rb_node rb_node;
1226 + unsigned int next;
1231 * The in-memory structure used to track swap areas.
1233 @@ -157,6 +168,9 @@ struct swap_info_struct {
1234 unsigned int gap_next;
1235 unsigned int gap_end;
1236 unsigned int gaps_exist;
1237 + struct rb_root gaps_tree;
1238 + struct swap_gap_node *gap_pool_arr;
1239 + unsigned long gap_last_scan;
1240 unsigned int lowest_bit;
1241 unsigned int highest_bit;
1242 unsigned int cluster_next;
1243 diff -Nurp kernel-2.6.28-20101501+0m5/include/net/bluetooth/sco.h kernel-2.6.28-20103103+0m5/include/net/bluetooth/sco.h
1244 --- kernel-2.6.28-20101501+0m5/include/net/bluetooth/sco.h 2008-12-25 00:26:37.000000000 +0100
1245 +++ kernel-2.6.28-20103103+0m5/include/net/bluetooth/sco.h 2012-12-16 13:35:56.036308854 +0100
1247 #define SCO_DEFAULT_MTU 500
1248 #define SCO_DEFAULT_FLUSH_TO 0xFFFF
1250 -#define SCO_CONN_TIMEOUT (HZ * 40)
1251 +#define SCO_CONN_TIMEOUT (HZ * 25)
1252 #define SCO_DISCONN_TIMEOUT (HZ * 2)
1253 #define SCO_CONN_IDLE_TIMEOUT (HZ * 60)
1255 diff -Nurp kernel-2.6.28-20101501+0m5/kernel/sched_fair.c kernel-2.6.28-20103103+0m5/kernel/sched_fair.c
1256 --- kernel-2.6.28-20101501+0m5/kernel/sched_fair.c 2008-12-25 00:26:37.000000000 +0100
1257 +++ kernel-2.6.28-20103103+0m5/kernel/sched_fair.c 2012-12-16 13:35:56.036308854 +0100
1258 @@ -48,10 +48,10 @@ unsigned int sysctl_sched_min_granularit
1259 static unsigned int sched_nr_latency = 5;
1262 - * After fork, child runs first. (default) If set to 0 then
1263 + * After fork, child runs first. If set to 0 then
1264 * parent will (try to) run first.
1266 -const_debug unsigned int sysctl_sched_child_runs_first = 1;
1267 +unsigned int sysctl_sched_child_runs_first __read_mostly;
1270 * sys_sched_yield() compat mode
1271 diff -Nurp kernel-2.6.28-20101501+0m5/kernel/sysctl.c kernel-2.6.28-20103103+0m5/kernel/sysctl.c
1272 --- kernel-2.6.28-20101501+0m5/kernel/sysctl.c 2008-12-25 00:26:37.000000000 +0100
1273 +++ kernel-2.6.28-20103103+0m5/kernel/sysctl.c 2012-12-16 13:35:56.036308854 +0100
1274 @@ -235,6 +235,14 @@ static int max_wakeup_granularity_ns = N
1277 static struct ctl_table kern_table[] = {
1279 + .ctl_name = CTL_UNNUMBERED,
1280 + .procname = "sched_child_runs_first",
1281 + .data = &sysctl_sched_child_runs_first,
1282 + .maxlen = sizeof(unsigned int),
1284 + .proc_handler = &proc_dointvec,
1286 #ifdef CONFIG_SCHED_DEBUG
1288 .ctl_name = CTL_UNNUMBERED,
1289 @@ -289,14 +297,6 @@ static struct ctl_table kern_table[] = {
1292 .ctl_name = CTL_UNNUMBERED,
1293 - .procname = "sched_child_runs_first",
1294 - .data = &sysctl_sched_child_runs_first,
1295 - .maxlen = sizeof(unsigned int),
1297 - .proc_handler = &proc_dointvec,
1300 - .ctl_name = CTL_UNNUMBERED,
1301 .procname = "sched_features",
1302 .data = &sysctl_sched_features,
1303 .maxlen = sizeof(unsigned int),
1304 diff -Nurp kernel-2.6.28-20101501+0m5/mm/swapfile.c kernel-2.6.28-20103103+0m5/mm/swapfile.c
1305 --- kernel-2.6.28-20101501+0m5/mm/swapfile.c 2012-12-16 13:29:04.852315222 +0100
1306 +++ kernel-2.6.28-20103103+0m5/mm/swapfile.c 2012-12-16 13:35:56.036308854 +0100
1307 @@ -996,11 +996,55 @@ static void drain_mmlist(void)
1308 spin_unlock(&mmlist_lock);
1311 +void gaps_rbtree_insert(struct swap_info_struct *sis,
1312 + struct swap_gap_node *node)
1314 + struct rb_node **p = &sis->gaps_tree.rb_node;
1315 + struct rb_node *parent = NULL;
1316 + struct swap_gap_node *tmp;
1320 + tmp = rb_entry(parent, struct swap_gap_node, rb_node);
1321 + if (swap_gap_len(node) < swap_gap_len(tmp))
1322 + p = &(*p)->rb_left;
1324 + p = &(*p)->rb_right;
1326 + rb_link_node(&node->rb_node, parent, p);
1327 + rb_insert_color(&node->rb_node, &sis->gaps_tree);
1330 +void gaps_rbtree_add(struct swap_info_struct *sis,
1331 + unsigned int next, unsigned int end,
1332 + struct swap_gap_node **gap_min, int *pos)
1334 + struct swap_gap_node *gap_node;
1335 + if (*pos < SWAP_GAP_TREE_SIZE) {
1336 + gap_node = &sis->gap_pool_arr[*pos];
1338 + } else if (swap_gap_len(*gap_min) > end - next) {
1341 + gap_node = *gap_min;
1342 + rb_erase(&gap_node->rb_node, &sis->gaps_tree);
1343 + *gap_min = swap_gap_rb_entry(rb_first(&sis->gaps_tree));
1345 + gap_node->next = next;
1346 + gap_node->end = end;
1347 + if (gap_min && (*gap_min == NULL ||
1348 + swap_gap_len(*gap_min) > swap_gap_len(gap_node)))
1349 + *gap_min = gap_node;
1350 + gaps_rbtree_insert(sis, gap_node);
1353 /* Find the largest sequence of free pages */
1354 int find_gap(struct swap_info_struct *sis)
1356 unsigned i, uninitialized_var(start), uninitialized_var(gap_next);
1357 - unsigned uninitialized_var(gap_end), gap_size = 0;
1358 + unsigned uninitialized_var(gap_end);
1359 + struct swap_gap_node *gap_max, *gap_min = NULL;
1363 spin_unlock(&sis->remap_lock);
1364 @@ -1017,6 +1061,11 @@ int find_gap(struct swap_info_struct *si
1365 mutex_unlock(&sis->remap_mutex);
1368 + if (time_after(jiffies, sis->gap_last_scan +
1369 + msecs_to_jiffies(SWAP_GAP_RESCAN_TIMEO_MSEC)))
1370 + sis->gaps_tree = RB_ROOT;
1371 + if (!RB_EMPTY_ROOT(&sis->gaps_tree))
1373 spin_unlock(&sis->remap_lock);
1376 @@ -1028,11 +1077,7 @@ int find_gap(struct swap_info_struct *si
1378 if (!(sis->swap_remap[i] & 0x80000000))
1380 - if (i - start > gap_size) {
1383 - gap_size = i - start;
1385 + gaps_rbtree_add(sis, start, i - 1, &gap_min, &pos);
1388 if (sis->swap_remap[i] & 0x80000000)
1389 @@ -1043,13 +1088,14 @@ int find_gap(struct swap_info_struct *si
1392 spin_lock(&sis->remap_lock);
1393 - if (in_gap && i - start > gap_size) {
1394 - sis->gap_next = start;
1395 - sis->gap_end = i - 1;
1397 - sis->gap_next = gap_next;
1398 - sis->gap_end = gap_end;
1401 + gaps_rbtree_add(sis, start, i - 1, &gap_min, &pos);
1402 + sis->gap_last_scan = jiffies;
1404 + gap_max = swap_gap_rb_entry(rb_last(&sis->gaps_tree));
1405 + rb_erase(&gap_max->rb_node, &sis->gaps_tree);
1406 + sis->gap_next = gap_max->next;
1407 + sis->gap_end = gap_max->end;
1408 mutex_unlock(&sis->remap_mutex);
1411 @@ -1471,6 +1517,7 @@ asmlinkage long sys_swapoff(const char _
1413 spin_unlock(&swap_lock);
1414 mutex_unlock(&swapon_mutex);
1415 + kfree(p->gap_pool_arr);
1416 vfree(p->swap_remap);
1418 inode = mapping->host;
1419 @@ -1825,6 +1872,14 @@ asmlinkage long sys_swapon(const char __
1423 + p->gap_pool_arr = kmalloc(sizeof(struct swap_gap_node)*
1424 + SWAP_GAP_TREE_SIZE, GFP_KERNEL);
1425 + if (!p->gap_pool_arr) {
1429 + p->gaps_tree = RB_ROOT;
1431 mutex_lock(&swapon_mutex);
1432 spin_lock(&swap_lock);
1433 if (swap_flags & SWAP_FLAG_PREFER)
1434 diff -Nurp kernel-2.6.28-20101501+0m5/net/bluetooth/hci_conn.c kernel-2.6.28-20103103+0m5/net/bluetooth/hci_conn.c
1435 --- kernel-2.6.28-20101501+0m5/net/bluetooth/hci_conn.c 2012-12-16 13:29:04.852315222 +0100
1436 +++ kernel-2.6.28-20103103+0m5/net/bluetooth/hci_conn.c 2012-12-16 13:35:56.036308854 +0100
1437 @@ -375,6 +375,9 @@ struct hci_conn *hci_connect(struct hci_
1439 if (acl->state == BT_CONNECTED &&
1440 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1441 + acl->power_save = 1;
1442 + hci_conn_enter_active_mode(acl);
1444 if (lmp_esco_capable(hdev))
1445 hci_setup_sync(sco, acl->handle);
1447 diff -Nurp kernel-2.6.28-20101501+0m5/net/bluetooth/hci_event.c kernel-2.6.28-20103103+0m5/net/bluetooth/hci_event.c
1448 --- kernel-2.6.28-20101501+0m5/net/bluetooth/hci_event.c 2012-12-16 13:28:34.852315687 +0100
1449 +++ kernel-2.6.28-20103103+0m5/net/bluetooth/hci_event.c 2012-12-16 13:35:56.036308854 +0100
1450 @@ -1056,6 +1056,8 @@ static inline void hci_auth_complete_evt
1453 conn->link_mode |= HCI_LM_AUTH;
1455 + conn->sec_level = BT_SECURITY_LOW;
1457 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1459 @@ -1709,6 +1711,7 @@ static inline void hci_sync_conn_complet
1462 case 0x1c: /* SCO interval rejected */
1463 + case 0x1a: /* Unsupported Remote Feature */
1464 case 0x1f: /* Unspecified error */
1465 if (conn->out && conn->attempt < 2) {
1466 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |