kernel-power v42 -> kernel-bfs
[kernel-bfs] / kernel-bfs-2.6.28 / debian / patches / nokia-20103103+0m5.diff
1 --- kernel-power-2.6.28.orig/arch/arm/include/asm/cacheflush.h
2 +++ kernel-power-2.6.28/arch/arm/include/asm/cacheflush.h
3 @@ -138,16 +138,16 @@
4   *     Please note that the implementation of these, and the required
5   *     effects are cache-type (VIVT/VIPT/PIPT) specific.
6   *
7 - *     flush_cache_kern_all()
8 + *     flush_kern_all()
9   *
10   *             Unconditionally clean and invalidate the entire cache.
11   *
12 - *     flush_cache_user_mm(mm)
13 + *     flush_user_all()
14   *
15   *             Clean and invalidate all user space cache entries
16   *             before a change of page tables.
17   *
18 - *     flush_cache_user_range(start, end, flags)
19 + *     flush_user_range(start, end, flags)
20   *
21   *             Clean and invalidate a range of cache entries in the
22   *             specified address space before a change of page tables.
23 @@ -163,6 +163,20 @@
24   *             - start  - virtual start address
25   *             - end    - virtual end address
26   *
27 + *     coherent_user_range(start, end)
28 + *
29 + *             Ensure coherency between the Icache and the Dcache in the
30 + *             region described by start, end.  If you have non-snooping
31 + *             Harvard caches, you need to implement this function.
32 + *             - start  - virtual start address
33 + *             - end    - virtual end address
34 + *
35 + *     flush_kern_dcache_area(kaddr, size)
36 + *
37 + *             Ensure that the data held in page is written back.
38 + *             - kaddr  - page address
39 + *             - size   - region size
40 + *
41   *     DMA Cache Coherency
42   *     ===================
43   *
44 @@ -375,7 +389,7 @@
45   * Harvard caches are synchronised for the user space address range.
46   * This is used for the ARM private sys_cacheflush system call.
47   */
48 -#define flush_cache_user_range(vma,start,end) \
49 +#define flush_cache_user_range(start,end) \
50         __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
51  
52  /*
53 --- kernel-power-2.6.28.orig/arch/arm/kernel/traps.c
54 +++ kernel-power-2.6.28/arch/arm/kernel/traps.c
55 @@ -418,7 +418,9 @@
56                 if (end > vma->vm_end)
57                         end = vma->vm_end;
58  
59 -               flush_cache_user_range(vma, start, end);
60 +               up_read(&mm->mmap_sem);
61 +               flush_cache_user_range(start, end);
62 +               return;
63         }
64         up_read(&mm->mmap_sem);
65  }
66 --- kernel-power-2.6.28.orig/arch/arm/mach-omap2/smartreflex.c
67 +++ kernel-power-2.6.28/arch/arm/mach-omap2/smartreflex.c
68 @@ -890,7 +890,7 @@
69                 return SR_FAIL;
70         }
71  
72 -       if (sr->is_autocomp_active) {
73 +       if (sr->is_autocomp_active && !sr->is_sr_reset) {
74                 WARN(1, "SR: Must not transmit VCBYPASS command while SR is "
75                      "active");
76                 return SR_FAIL;
77 --- kernel-power-2.6.28.orig/arch/arm/mm/fault.c
78 +++ kernel-power-2.6.28/arch/arm/mm/fault.c
79 @@ -387,6 +387,9 @@
80         if (addr < TASK_SIZE)
81                 return do_page_fault(addr, fsr, regs);
82  
83 +       if (user_mode(regs))
84 +               goto bad_area;
85 +
86         index = pgd_index(addr);
87  
88         /*
89 @@ -449,7 +452,12 @@
90         { do_bad,               SIGILL,  BUS_ADRALN,    "alignment exception"              },
91         { do_bad,               SIGKILL, 0,             "terminal exception"               },
92         { do_bad,               SIGILL,  BUS_ADRALN,    "alignment exception"              },
93 +/* Do we need runtime check ? */
94 +#if __LINUX_ARM_ARCH__ < 6
95         { do_bad,               SIGBUS,  0,             "external abort on linefetch"      },
96 +#else
97 +       { do_translation_fault, SIGSEGV, SEGV_MAPERR,   "I-cache maintenance fault"        },
98 +#endif
99         { do_translation_fault, SIGSEGV, SEGV_MAPERR,   "section translation fault"        },
100         { do_bad,               SIGBUS,  0,             "external abort on linefetch"      },
101         { do_page_fault,        SIGSEGV, SEGV_MAPERR,   "page translation fault"           },
102 --- kernel-power-2.6.28.orig/arch/arm/mm/mmu.c
103 +++ kernel-power-2.6.28/arch/arm/mm/mmu.c
104 @@ -953,4 +953,6 @@
105                 pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
106                 flush_pmd_entry(pmd);
107         }
108 +
109 +       local_flush_tlb_all();
110  }
111 --- kernel-power-2.6.28.orig/arch/arm/mm/proc-v6.S
112 +++ kernel-power-2.6.28/arch/arm/mm/proc-v6.S
113 @@ -56,8 +56,6 @@
114   *     to what would be the reset vector.
115   *
116   *     - loc   - location to jump to for soft reset
117 - *
118 - *     It is assumed that:
119   */
120         .align  5
121  ENTRY(cpu_v6_reset)
122 --- kernel-power-2.6.28.orig/arch/arm/mm/proc-v7.S
123 +++ kernel-power-2.6.28/arch/arm/mm/proc-v7.S
124 @@ -28,7 +28,14 @@
125  ENDPROC(cpu_v7_proc_init)
126  
127  ENTRY(cpu_v7_proc_fin)
128 -       mov     pc, lr
129 +       stmfd   sp!, {lr}
130 +       cpsid   if                              @ disable interrupts
131 +       bl      v7_flush_kern_cache_all
132 +       mrc     p15, 0, r0, c1, c0, 0           @ ctrl register
133 +       bic     r0, r0, #0x1000                 @ ...i............
134 +       bic     r0, r0, #0x0006                 @ .............ca.
135 +       mcr     p15, 0, r0, c1, c0, 0           @ disable caches
136 +       ldmfd   sp!, {pc}
137  ENDPROC(cpu_v7_proc_fin)
138  
139  /*
140 @@ -39,8 +46,6 @@
141   *     to what would be the reset vector.
142   *
143   *     - loc   - location to jump to for soft reset
144 - *
145 - *     It is assumed that:
146   */
147         .align  5
148  ENTRY(cpu_v7_reset)
149 --- kernel-power-2.6.28.orig/block/cfq-iosched.c
150 +++ kernel-power-2.6.28/block/cfq-iosched.c
151 @@ -84,6 +84,11 @@
152          */
153         struct cfq_rb_root service_tree;
154         unsigned int busy_queues;
155 +       /*
156 +        * Used to track any pending rt requests so we can pre-empt current
157 +        * non-RT cfqq in service when this value is non-zero.
158 +        */
159 +       unsigned int busy_rt_queues;
160  
161         int rq_in_driver;
162         int sync_flight;
163 @@ -155,6 +160,7 @@
164  
165         unsigned long slice_end;
166         long slice_resid;
167 +       unsigned int slice_dispatch;
168  
169         /* pending metadata requests */
170         int meta_pending;
171 @@ -171,13 +177,12 @@
172  enum cfqq_state_flags {
173         CFQ_CFQQ_FLAG_on_rr = 0,        /* on round-robin busy list */
174         CFQ_CFQQ_FLAG_wait_request,     /* waiting for a request */
175 +       CFQ_CFQQ_FLAG_must_dispatch,    /* must be allowed a dispatch */
176         CFQ_CFQQ_FLAG_must_alloc,       /* must be allowed rq alloc */
177         CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
178 -       CFQ_CFQQ_FLAG_must_dispatch,    /* must dispatch, even if expired */
179         CFQ_CFQQ_FLAG_fifo_expire,      /* FIFO checked in this slice */
180         CFQ_CFQQ_FLAG_idle_window,      /* slice idling enabled */
181         CFQ_CFQQ_FLAG_prio_changed,     /* task priority has changed */
182 -       CFQ_CFQQ_FLAG_queue_new,        /* queue never been serviced */
183         CFQ_CFQQ_FLAG_slice_new,        /* no requests dispatched in slice */
184         CFQ_CFQQ_FLAG_sync,             /* synchronous queue */
185  };
186 @@ -198,13 +203,12 @@
187  
188  CFQ_CFQQ_FNS(on_rr);
189  CFQ_CFQQ_FNS(wait_request);
190 +CFQ_CFQQ_FNS(must_dispatch);
191  CFQ_CFQQ_FNS(must_alloc);
192  CFQ_CFQQ_FNS(must_alloc_slice);
193 -CFQ_CFQQ_FNS(must_dispatch);
194  CFQ_CFQQ_FNS(fifo_expire);
195  CFQ_CFQQ_FNS(idle_window);
196  CFQ_CFQQ_FNS(prio_changed);
197 -CFQ_CFQQ_FNS(queue_new);
198  CFQ_CFQQ_FNS(slice_new);
199  CFQ_CFQQ_FNS(sync);
200  #undef CFQ_CFQQ_FNS
201 @@ -562,6 +566,8 @@
202         BUG_ON(cfq_cfqq_on_rr(cfqq));
203         cfq_mark_cfqq_on_rr(cfqq);
204         cfqd->busy_queues++;
205 +       if (cfq_class_rt(cfqq))
206 +               cfqd->busy_rt_queues++;
207  
208         cfq_resort_rr_list(cfqd, cfqq);
209  }
210 @@ -581,6 +587,8 @@
211  
212         BUG_ON(!cfqd->busy_queues);
213         cfqd->busy_queues--;
214 +       if (cfq_class_rt(cfqq))
215 +               cfqd->busy_rt_queues--;
216  }
217  
218  /*
219 @@ -765,10 +773,15 @@
220         if (cfqq) {
221                 cfq_log_cfqq(cfqd, cfqq, "set_active");
222                 cfqq->slice_end = 0;
223 +               cfqq->slice_dispatch = 0;
224 +
225 +               cfq_clear_cfqq_wait_request(cfqq);
226 +               cfq_clear_cfqq_must_dispatch(cfqq);
227                 cfq_clear_cfqq_must_alloc_slice(cfqq);
228                 cfq_clear_cfqq_fifo_expire(cfqq);
229                 cfq_mark_cfqq_slice_new(cfqq);
230 -               cfq_clear_cfqq_queue_new(cfqq);
231 +
232 +               del_timer(&cfqd->idle_slice_timer);
233         }
234  
235         cfqd->active_queue = cfqq;
236 @@ -786,7 +799,6 @@
237         if (cfq_cfqq_wait_request(cfqq))
238                 del_timer(&cfqd->idle_slice_timer);
239  
240 -       cfq_clear_cfqq_must_dispatch(cfqq);
241         cfq_clear_cfqq_wait_request(cfqq);
242  
243         /*
244 @@ -915,7 +927,6 @@
245             (sample_valid(cic->ttime_samples) && cic->ttime_mean > 2))
246                 return;
247  
248 -       cfq_mark_cfqq_must_dispatch(cfqq);
249         cfq_mark_cfqq_wait_request(cfqq);
250  
251         /*
252 @@ -1001,10 +1012,24 @@
253         /*
254          * The active queue has run out of time, expire it and select new.
255          */
256 -       if (cfq_slice_used(cfqq))
257 +       if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq))
258                 goto expire;
259  
260         /*
261 +        * If we have a RT cfqq waiting, then we pre-empt the current non-rt
262 +        * cfqq.
263 +        */
264 +       if (!cfq_class_rt(cfqq) && cfqd->busy_rt_queues) {
265 +               /*
266 +                * We simulate this as cfqq timed out so that it gets to bank
267 +                * the remaining of its time slice.
268 +                */
269 +               cfq_log_cfqq(cfqd, cfqq, "preempt");
270 +               cfq_slice_expired(cfqd, 1);
271 +               goto new_queue;
272 +       }
273 +
274 +       /*
275          * The active queue has requests and isn't expired, allow it to
276          * dispatch.
277          */
278 @@ -1030,59 +1055,6 @@
279         return cfqq;
280  }
281  
282 -/*
283 - * Dispatch some requests from cfqq, moving them to the request queue
284 - * dispatch list.
285 - */
286 -static int
287 -__cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
288 -                       int max_dispatch)
289 -{
290 -       int dispatched = 0;
291 -
292 -       BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
293 -
294 -       do {
295 -               struct request *rq;
296 -
297 -               /*
298 -                * follow expired path, else get first next available
299 -                */
300 -               rq = cfq_check_fifo(cfqq);
301 -               if (rq == NULL)
302 -                       rq = cfqq->next_rq;
303 -
304 -               /*
305 -                * finally, insert request into driver dispatch list
306 -                */
307 -               cfq_dispatch_insert(cfqd->queue, rq);
308 -
309 -               dispatched++;
310 -
311 -               if (!cfqd->active_cic) {
312 -                       atomic_inc(&RQ_CIC(rq)->ioc->refcount);
313 -                       cfqd->active_cic = RQ_CIC(rq);
314 -               }
315 -
316 -               if (RB_EMPTY_ROOT(&cfqq->sort_list))
317 -                       break;
318 -
319 -       } while (dispatched < max_dispatch);
320 -
321 -       /*
322 -        * expire an async queue immediately if it has used up its slice. idle
323 -        * queue always expire after 1 dispatch round.
324 -        */
325 -       if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
326 -           dispatched >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
327 -           cfq_class_idle(cfqq))) {
328 -               cfqq->slice_end = jiffies + 1;
329 -               cfq_slice_expired(cfqd, 0);
330 -       }
331 -
332 -       return dispatched;
333 -}
334 -
335  static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
336  {
337         int dispatched = 0;
338 @@ -1116,11 +1088,45 @@
339         return dispatched;
340  }
341  
342 +/*
343 + * Dispatch a request from cfqq, moving them to the request queue
344 + * dispatch list.
345 + */
346 +static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
347 +{
348 +       struct request *rq;
349 +
350 +       BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
351 +
352 +       /*
353 +        * follow expired path, else get first next available
354 +        */
355 +       rq = cfq_check_fifo(cfqq);
356 +       if (!rq)
357 +               rq = cfqq->next_rq;
358 +
359 +       /*
360 +        * insert request into driver dispatch list
361 +        */
362 +       cfq_dispatch_insert(cfqd->queue, rq);
363 +
364 +       if (!cfqd->active_cic) {
365 +               struct cfq_io_context *cic = RQ_CIC(rq);
366 +
367 +               atomic_inc(&cic->ioc->refcount);
368 +               cfqd->active_cic = cic;
369 +       }
370 +}
371 +
372 +/*
373 + * Find the cfqq that we need to service and move a request from that to the
374 + * dispatch list
375 + */
376  static int cfq_dispatch_requests(struct request_queue *q, int force)
377  {
378         struct cfq_data *cfqd = q->elevator->elevator_data;
379         struct cfq_queue *cfqq;
380 -       int dispatched;
381 +       unsigned int max_dispatch;
382  
383         if (!cfqd->busy_queues)
384                 return 0;
385 @@ -1128,33 +1134,63 @@
386         if (unlikely(force))
387                 return cfq_forced_dispatch(cfqd);
388  
389 -       dispatched = 0;
390 -       while ((cfqq = cfq_select_queue(cfqd)) != NULL) {
391 -               int max_dispatch;
392 +       cfqq = cfq_select_queue(cfqd);
393 +       if (!cfqq)
394 +               return 0;
395 +
396 +       /*
397 +        * If this is an async queue and we have sync IO in flight, let it wait
398 +        */
399 +       if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
400 +               return 0;
401 +
402 +       max_dispatch = cfqd->cfq_quantum;
403 +       if (cfq_class_idle(cfqq))
404 +               max_dispatch = 1;
405  
406 -               max_dispatch = cfqd->cfq_quantum;
407 +       /*
408 +        * Does this cfqq already have too much IO in flight?
409 +        */
410 +       if (cfqq->dispatched >= max_dispatch) {
411 +               /*
412 +                * idle queue must always only have a single IO in flight
413 +                */
414                 if (cfq_class_idle(cfqq))
415 -                       max_dispatch = 1;
416 +                       return 0;
417  
418 -               if (cfqq->dispatched >= max_dispatch) {
419 -                       if (cfqd->busy_queues > 1)
420 -                               break;
421 -                       if (cfqq->dispatched >= 4 * max_dispatch)
422 -                               break;
423 -               }
424 +               /*
425 +                * We have other queues, don't allow more IO from this one
426 +                */
427 +               if (cfqd->busy_queues > 1)
428 +                       return 0;
429  
430 -               if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
431 -                       break;
432 +               /*
433 +                * we are the only queue, allow up to 4 times of 'quantum'
434 +                */
435 +               if (cfqq->dispatched >= 4 * max_dispatch)
436 +                       return 0;
437 +       }
438  
439 -               cfq_clear_cfqq_must_dispatch(cfqq);
440 -               cfq_clear_cfqq_wait_request(cfqq);
441 -               del_timer(&cfqd->idle_slice_timer);
442 +       /*
443 +        * Dispatch a request from this cfqq
444 +        */
445 +       cfq_dispatch_request(cfqd, cfqq);
446 +       cfqq->slice_dispatch++;
447 +       cfq_clear_cfqq_must_dispatch(cfqq);
448  
449 -               dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
450 +       /*
451 +        * expire an async queue immediately if it has used up its slice. idle
452 +        * queue always expire after 1 dispatch round.
453 +        */
454 +       if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
455 +           cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
456 +           cfq_class_idle(cfqq))) {
457 +               cfqq->slice_end = jiffies + 1;
458 +               cfq_slice_expired(cfqd, 0);
459         }
460  
461 -       cfq_log(cfqd, "dispatched=%d", dispatched);
462 -       return dispatched;
463 +       cfq_log(cfqd, "dispatched a request");
464 +       return 1;
465  }
466  
467  /*
468 @@ -1318,7 +1354,15 @@
469                 unsigned long flags;
470  
471                 spin_lock_irqsave(q->queue_lock, flags);
472 -               __cfq_exit_single_io_context(cfqd, cic);
473 +
474 +               /*
475 +                * Ensure we get a fresh copy of the ->key to prevent
476 +                * race between exiting task and queue
477 +                */
478 +               smp_read_barrier_depends();
479 +               if (cic->key)
480 +                       __cfq_exit_single_io_context(cfqd, cic);
481 +
482                 spin_unlock_irqrestore(q->queue_lock, flags);
483         }
484  }
485 @@ -1472,7 +1516,6 @@
486                 cfqq->cfqd = cfqd;
487  
488                 cfq_mark_cfqq_prio_changed(cfqq);
489 -               cfq_mark_cfqq_queue_new(cfqq);
490  
491                 cfq_init_prio_data(cfqq, ioc);
492  
493 @@ -1797,6 +1840,12 @@
494         if (rq_is_meta(rq) && !cfqq->meta_pending)
495                 return 1;
496  
497 +       /*
498 +        * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
499 +        */
500 +       if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
501 +               return 1;
502 +
503         if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
504                 return 0;
505  
506 @@ -1853,23 +1902,28 @@
507  
508         if (cfqq == cfqd->active_queue) {
509                 /*
510 -                * if we are waiting for a request for this queue, let it rip
511 -                * immediately and flag that we must not expire this queue
512 -                * just now
513 +                * Remember that we saw a request from this process, but
514 +                * don't start queuing just yet. Otherwise we risk seeing lots
515 +                * of tiny requests, because we disrupt the normal plugging
516 +                * and merging. If the request is already larger than a single
517 +                * page, let it rip immediately. For that case we assume that
518 +                * merging is already done.
519                  */
520                 if (cfq_cfqq_wait_request(cfqq)) {
521 +                       if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE) {
522 +                               del_timer(&cfqd->idle_slice_timer);
523 +                               blk_start_queueing(cfqd->queue);
524 +                       }
525                         cfq_mark_cfqq_must_dispatch(cfqq);
526 -                       del_timer(&cfqd->idle_slice_timer);
527 -                       blk_start_queueing(cfqd->queue);
528                 }
529         } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
530                 /*
531                  * not the active queue - expire current slice if it is
532                  * idle and has expired it's mean thinktime or this new queue
533 -                * has some old slice time left and is of higher priority
534 +                * has some old slice time left and is of higher priority or
535 +                * this new queue is RT and the current one is BE
536                  */
537                 cfq_preempt_queue(cfqd, cfqq);
538 -               cfq_mark_cfqq_must_dispatch(cfqq);
539                 blk_start_queueing(cfqd->queue);
540         }
541  }
542 @@ -2129,6 +2183,12 @@
543                 timed_out = 0;
544  
545                 /*
546 +                * We saw a request before the queue expired, let it through
547 +                */
548 +               if (cfq_cfqq_must_dispatch(cfqq))
549 +                       goto out_kick;
550 +
551 +               /*
552                  * expired
553                  */
554                 if (cfq_slice_used(cfqq))
555 @@ -2144,10 +2204,8 @@
556                 /*
557                  * not expired and it has a request pending, let it dispatch
558                  */
559 -               if (!RB_EMPTY_ROOT(&cfqq->sort_list)) {
560 -                       cfq_mark_cfqq_must_dispatch(cfqq);
561 +               if (!RB_EMPTY_ROOT(&cfqq->sort_list))
562                         goto out_kick;
563 -               }
564         }
565  expire:
566         cfq_slice_expired(cfqd, timed_out);
567 --- kernel-power-2.6.28.orig/drivers/dsp/bridge/rmgr/drv.c
568 +++ kernel-power-2.6.28/drivers/dsp/bridge/rmgr/drv.c
569 @@ -517,11 +517,12 @@
570                 pDMMRes = pDMMList;
571                 pDMMList = pDMMList->next;
572                 if (pDMMRes->dmmAllocated) {
573 -                       status = PROC_UnMap(pDMMRes->hProcessor,
574 -                                (void *)pDMMRes->ulDSPResAddr, pCtxt);
575 -                       status = PROC_UnReserveMemory(pDMMRes->hProcessor,
576 -                                (void *)pDMMRes->ulDSPResAddr);
577 -                       pDMMRes->dmmAllocated = 0;
578 +                       /* PROC_UnMap frees pDMMRes */
579 +                       void *processor = pDMMRes->hProcessor;
580 +                       void *map_addr = (void*)pDMMRes->ulDSPAddr;
581 +                       void *rsv_addr = (void*)pDMMRes->ulDSPResAddr;
582 +                       status = PROC_UnMap(processor, map_addr, pCtxt);
583 +                       status = PROC_UnReserveMemory(processor, rsv_addr);
584                 }
585         }
586         return status;
587 --- kernel-power-2.6.28.orig/drivers/dsp/bridge/rmgr/proc.c
588 +++ kernel-power-2.6.28/drivers/dsp/bridge/rmgr/proc.c
589 @@ -750,6 +750,7 @@
590                         break;
591  
592                 start = vma->vm_end;
593 +               len -= size;
594         }
595  
596         if (!vma)
597 --- kernel-power-2.6.28.orig/drivers/i2c/chips/lis302dl.c
598 +++ kernel-power-2.6.28/drivers/i2c/chips/lis302dl.c
599 @@ -44,6 +44,7 @@
600  #      define LIS302_CTRL1_Y           (1 << 1)
601  #      define LIS302_CTRL1_X           (1 << 0)
602  #define LIS302_CTRL_2                  0x21
603 +#      define LIS302_CTRL2_BOOT        (1 << 6)
604  #define LIS302_CTRL_3                  0x22
605  #      define  LIS302_CTRL3_GND        0x00
606  #      define  LIS302_CTRL3_FF_WU_1    0x01
607 @@ -161,8 +162,13 @@
608         if (ret < 0)
609                 goto out;
610  
611 -       /* REG 2 */
612 -       /* Control High Pass filter selection. not used */
613 +       /* REG 2
614 +        * Boot is used to refresh internal registers
615 +        * Control High Pass filter selection. not used
616 +        */
617 +       ret = lis302dl_write(c, LIS302_CTRL_2, LIS302_CTRL2_BOOT);
618 +       if (ret < 0)
619 +               goto out;
620  
621         /* REG 3
622          * Interrupt CTRL register. One interrupt pin is used for
623 --- kernel-power-2.6.28.orig/drivers/leds/leds-lp5523.c
624 +++ kernel-power-2.6.28/drivers/leds/leds-lp5523.c
625 @@ -32,6 +32,7 @@
626  #include <linux/wait.h>
627  #include <linux/leds.h>
628  #include <linux/leds-lp5523.h>
629 +#include <linux/workqueue.h>
630  
631  #define LP5523_DRIVER_NAME             "lp5523"
632  #define LP5523_REG_ENABLE              0x00
633 @@ -120,6 +121,8 @@
634         u8                      led_nr;
635         u8                      led_current;
636         struct led_classdev     cdev;
637 +       struct work_struct brightness_work;
638 +       u8                      brightness;
639  };
640  
641  struct lp5523_chip {
642 @@ -161,6 +164,8 @@
643  static void lp5523_work(struct work_struct  *work);
644  static irqreturn_t lp5523_irq(int irq, void *_chip);
645  
646 +static void lp5523_led_brightness_work(struct work_struct *work);
647 +
648  
649  static int lp5523_write(struct i2c_client *client, u8 reg, u8 value)
650  {
651 @@ -476,6 +481,16 @@
652                              enum led_brightness brightness)
653  {
654         struct lp5523_led *led = cdev_to_led(cdev);
655 +       led->brightness = (u8)brightness;
656 +
657 +       schedule_work(&led->brightness_work);
658 +}
659 +
660 +static void lp5523_led_brightness_work(struct work_struct *work)
661 +{
662 +       struct lp5523_led *led = container_of(work,
663 +                                             struct lp5523_led,
664 +                                             brightness_work);
665         struct lp5523_chip *chip = led_to_lp5523(led);
666         struct i2c_client *client = chip->client;
667  
668 @@ -483,7 +498,7 @@
669  
670         lp5523_write(client,
671                      LP5523_REG_LED_PWM_BASE + led->led_nr,
672 -                    (u8)brightness);
673 +                    led->brightness);
674  
675         mutex_unlock(&chip->lock);
676  }
677 @@ -907,6 +922,8 @@
678                         dev_err(&client->dev, "error initializing leds\n");
679                         goto fail2;
680                 }
681 +               INIT_WORK(&(chip->leds[i].brightness_work),
682 +                         lp5523_led_brightness_work);
683         }
684  
685         ret = lp5523_register_sysfs(client);
686 @@ -916,8 +933,10 @@
687         }
688         return ret;
689  fail2:
690 -       for (i = 0; i < pdata->num_leds; i++)
691 +       for (i = 0; i < pdata->num_leds; i++) {
692                 led_classdev_unregister(&chip->leds[i].cdev);
693 +               cancel_work_sync(&chip->leds[i].brightness_work);
694 +               }
695  
696  fail1:
697         kfree(chip);
698 @@ -931,8 +950,10 @@
699  
700         lp5523_unregister_sysfs(client);
701  
702 -       for (i = 0; i < chip->num_leds; i++)
703 +       for (i = 0; i < chip->num_leds; i++) {
704                 led_classdev_unregister(&chip->leds[i].cdev);
705 +               cancel_work_sync(&chip->leds[i].brightness_work);
706 +               }
707  
708         kfree(chip);
709  
710 --- kernel-power-2.6.28.orig/drivers/media/radio/radio-si4713.c
711 +++ kernel-power-2.6.28/drivers/media/radio/radio-si4713.c
712 @@ -54,6 +54,25 @@
713  /* module parameters */
714  static int radio_nr = -1;      /* radio device minor (-1 ==> auto assign) */
715  
716 +/* properties lock for write operations */
717 +static int config_locked;
718 +
719 +/* saved power levels */
720 +static unsigned int max_pl;
721 +static unsigned int min_pl;
722 +
723 +/* structure for pid registration */
724 +struct pid_list {
725 +       pid_t pid;
726 +       struct list_head plist;
727 +};
728 +
729 +#define APP_MAX_NUM    2
730 +
731 +static int pid_count;
732 +static LIST_HEAD(pid_list_head);
733 +static struct si4713_device *si4713_dev;
734 +
735  /*
736   * Sysfs properties
737   * Read and write functions
738 @@ -167,6 +186,37 @@
739                                         si4713_##prop##_write);
740  
741  /*
742 + * Config lock property
743 + */
744 +static ssize_t si4713_lock_write(struct device *dev,
745 +                               struct device_attribute *attr,
746 +                               const char *buf,
747 +                               size_t count)
748 +{
749 +       int l;
750 +
751 +       if (config_locked)
752 +               return -EPERM;
753 +
754 +       sscanf(buf, "%d", &l);
755 +
756 +       if (l != 0)
757 +               config_locked = 1;
758 +
759 +       return count;
760 +}
761 +
762 +static ssize_t si4713_lock_read(struct device *dev,
763 +                               struct device_attribute *attr,
764 +                               char *buf)
765 +{
766 +       return sprintf(buf, "%d\n", config_locked);
767 +}
768 +
769 +static DEVICE_ATTR(lock, S_IRUGO | S_IWUSR, si4713_lock_read,
770 +                       si4713_lock_write);
771 +
772 +/*
773   * Power level property
774   */
775  /* power_level (rw) 88 - 115 or 0 */
776 @@ -179,6 +229,9 @@
777         unsigned int p;
778         int rval, pl;
779  
780 +       if (config_locked)
781 +               return -EPERM;
782 +
783         if (!sdev) {
784                 rval = -ENODEV;
785                 goto exit;
786 @@ -320,6 +373,7 @@
787                         value > MAX_TONE_OFF_TIME)
788  
789  static struct attribute *attrs[] = {
790 +       &dev_attr_lock.attr,
791         &dev_attr_power_level.attr,
792         &dev_attr_antenna_capacitor.attr,
793         &dev_attr_rds_pi.attr,
794 @@ -366,13 +420,118 @@
795         return IRQ_HANDLED;
796  }
797  
798 +static int register_pid(pid_t pid)
799 +{
800 +       struct pid_list *pitem;
801 +
802 +       list_for_each_entry(pitem, &pid_list_head, plist) {
803 +               if (pitem->pid == pid)
804 +                       return -EINVAL;
805 +       }
806 +
807 +       pitem = kmalloc(sizeof(struct pid_list), GFP_KERNEL);
808 +
809 +       if (!pitem)
810 +               return -ENOMEM;
811 +
812 +       pitem->pid = pid;
813 +
814 +       list_add(&(pitem->plist), &pid_list_head);
815 +       pid_count++;
816 +
817 +       return 0;
818 +}
819 +
820 +static int unregister_pid(pid_t pid)
821 +{
822 +       struct pid_list *pitem, *n;
823 +
824 +       list_for_each_entry_safe(pitem, n, &pid_list_head, plist) {
825 +               if (pitem->pid == pid) {
826 +                       list_del(&(pitem->plist));
827 +                       pid_count--;
828 +
829 +                       kfree(pitem);
830 +
831 +                       return 0;
832 +               }
833 +       }
834 +       return -EINVAL;
835 +}
836 +
837 +static int si4713_priv_ioctl(struct inode *inode, struct file *file,
838 +               unsigned int cmd, unsigned long arg)
839 +{
840 +       unsigned int pow;
841 +       int pl, rval;
842 +
843 +       if (cmd != LOCK_LOW_POWER && cmd != RELEASE_LOW_POWER)
844 +               return video_ioctl2(inode, file, cmd, arg);
845 +
846 +       pl = si4713_get_power_level(si4713_dev);
847 +
848 +       if (pl < 0) {
849 +               rval = pl;
850 +               goto exit;
851 +       }
852 +
853 +       if (copy_from_user(&pow, (void __user *)arg, sizeof(pow))) {
854 +               rval = -EFAULT;
855 +               goto exit;
856 +       }
857 +
858 +       if (cmd == LOCK_LOW_POWER) {
859 +
860 +               if (pid_count == APP_MAX_NUM) {
861 +                       rval = -EPERM;
862 +                       goto exit;
863 +               }
864 +
865 +               if (pid_count == 0) {
866 +                       if (pow > pl) {
867 +                               rval = -EINVAL;
868 +                               goto exit;
869 +                       } else {
870 +                               /* Set max possible power level */
871 +                               max_pl = pl;
872 +                               min_pl = pow;
873 +                       }
874 +               }
875 +
876 +               rval = register_pid(current->pid);
877 +
878 +               if (rval)
879 +                       goto exit;
880 +
881 +               /* Lower min power level if asked */
882 +               if (pow < min_pl)
883 +                       min_pl = pow;
884 +               else
885 +                       pow = min_pl;
886 +
887 +       } else { /* RELEASE_LOW_POWER */
888 +               rval = unregister_pid(current->pid);
889 +
890 +               if (rval)
891 +                       goto exit;
892 +
893 +               if (pid_count == 0) {
894 +                       if (pow > max_pl)
895 +                               pow = max_pl;
896 +               }
897 +       }
898 +       rval = si4713_set_power_level(si4713_dev, pow);
899 +exit:
900 +       return rval;
901 +}
902 +
903  /*
904   * si4713_fops - file operations interface
905   */
906  static const struct file_operations si4713_fops = {
907         .owner          = THIS_MODULE,
908         .llseek         = no_llseek,
909 -       .ioctl          = video_ioctl2,
910 +       .ioctl          = si4713_priv_ioctl,
911         .compat_ioctl   = v4l_compat_ioctl32,
912  };
913  
914 @@ -747,6 +906,9 @@
915                 goto free_sysfs;
916         }
917  
918 +       /* save to global pointer for it to be accesible from ioctl() call */
919 +       si4713_dev = sdev;
920 +
921         return 0;
922  
923  free_sysfs:
924 --- kernel-power-2.6.28.orig/drivers/media/radio/radio-si4713.h
925 +++ kernel-power-2.6.28/drivers/media/radio/radio-si4713.h
926 @@ -21,6 +21,9 @@
927  #define SI4713_I2C_ADDR_BUSEN_HIGH     0x63
928  #define SI4713_I2C_ADDR_BUSEN_LOW      0x11
929  
930 +#define LOCK_LOW_POWER         _IOW('v', BASE_VIDIOC_PRIVATE + 0, unsigned int)
931 +#define RELEASE_LOW_POWER      _IOW('v', BASE_VIDIOC_PRIVATE + 1, unsigned int)
932 +
933  /*
934   * Platform dependent definition
935   */
936 --- kernel-power-2.6.28.orig/drivers/media/video/omap34xxcam.c
937 +++ kernel-power-2.6.28/drivers/media/video/omap34xxcam.c
938 @@ -1833,6 +1833,7 @@
939         struct omap34xxcam_videodev *vdev = fh->vdev;
940         struct device *isp = vdev->cam->isp;
941         int i;
942 +       int streamoff = 0;
943  
944         if (omap34xxcam_daemon_release(vdev, file))
945                 goto daemon_out;
946 @@ -1844,6 +1845,7 @@
947                 omap34xxcam_slave_power_set(vdev, V4L2_POWER_STANDBY,
948                                             OMAP34XXCAM_SLAVE_POWER_ALL);
949                 vdev->streaming = NULL;
950 +               streamoff = 1;
951         }
952  
953         if (atomic_dec_return(&vdev->users) == 0) {
954 @@ -1853,6 +1855,10 @@
955         }
956         mutex_unlock(&vdev->mutex);
957  
958 +       if (streamoff)
959 +               omap34xxcam_daemon_req_hw_reconfig(
960 +                       vdev, OMAP34XXCAM_DAEMON_HW_RECONFIG_STREAMOFF);
961 +
962  daemon_out:
963         file->private_data = NULL;
964  
965 --- kernel-power-2.6.28.orig/drivers/mmc/host/omap_hsmmc.c
966 +++ kernel-power-2.6.28/drivers/mmc/host/omap_hsmmc.c
967 @@ -115,6 +115,7 @@
968  /* Timeouts for entering power saving states on inactivity, msec */
969  #define OMAP_MMC_DISABLED_TIMEOUT      100
970  #define OMAP_MMC_SLEEP_TIMEOUT         1000
971 +#define OMAP_MMC_OFF_NOSLP_TIMEOUT     3000
972  #define OMAP_MMC_OFF_TIMEOUT           8000
973  
974  /*
975 @@ -1249,21 +1250,21 @@
976  
977  /*
978   * Dynamic power saving handling, FSM:
979 - *   ENABLED -> DISABLED -> CARDSLEEP / REGSLEEP -> OFF
980 - *     ^___________|          |                      |
981 - *     |______________________|______________________|
982 + *   ENABLED -> DISABLED -> EXTDISABLED / CARDSLEEP / REGSLEEP -> OFF
983 + *     ^___________|                        |                      |
984 + *     |____________________________________|______________________|
985   *
986 - * ENABLED:   mmc host is fully functional
987 - * DISABLED:  fclk is off
988 - * CARDSLEEP: fclk is off, card is asleep, voltage regulator is asleep
989 - * REGSLEEP:  fclk is off, voltage regulator is asleep
990 - * OFF:       fclk is off, voltage regulator is off
991 + * ENABLED:       mmc host is fully functional
992 + * (EXT)DISABLED: fclk is off
993 + * CARDSLEEP:     fclk is off, card is asleep, voltage regulator is asleep
994 + * REGSLEEP:      fclk is off, voltage regulator is asleep
995 + * OFF:           fclk is off, voltage regulator is off
996   *
997   * Transition handlers return the timeout for the next state transition
998   * or negative error.
999   */
1000  
1001 -enum {ENABLED = 0, DISABLED, CARDSLEEP, REGSLEEP, OFF};
1002 +enum {ENABLED = 0, DISABLED, EXTDISABLED, CARDSLEEP, REGSLEEP, OFF};
1003  
1004  /* Handler for [ENABLED -> DISABLED] transition */
1005  static int omap_hsmmc_enabled_to_disabled(struct omap_hsmmc_host *host)
1006 @@ -1300,7 +1301,21 @@
1007         return 1;
1008  }
1009  
1010 -/* Handler for [DISABLED -> REGSLEEP / CARDSLEEP] transition */
1011 +/* Big SD cards (16GiB) are prohibited from
1012 +   switching voltage regulator to asleep
1013 +   because of high current consumption */
1014 +static int omap_hsmmc_support_sleep(struct mmc_host *mmc)
1015 +{
1016 +       if (!(mmc->caps & MMC_CAP_NONREMOVABLE) &&
1017 +           ((u64)mmc->card->csd.capacity << mmc->card->csd.read_blkbits) >
1018 +           14ULL * 1024 * 1024 * 1024) {
1019 +               return 0;
1020 +       }
1021 +
1022 +       return 1;
1023 +}
1024 +
1025 +/* Handler for [DISABLED -> EXTDISABLED / REGSLEEP / CARDSLEEP] transition */
1026  static int omap_hsmmc_disabled_to_sleep(struct omap_hsmmc_host *host)
1027  {
1028         int err, new_state, sleep;
1029 @@ -1319,12 +1334,12 @@
1030                 }
1031                 new_state = CARDSLEEP;
1032         } else {
1033 -               new_state = REGSLEEP;
1034 +               new_state = omap_hsmmc_support_sleep(host->mmc) ? REGSLEEP : EXTDISABLED;
1035         }
1036  
1037         sleep = omap_hsmmc_full_sleep(host->mmc->card) &&
1038                 (new_state == CARDSLEEP);
1039 -       if (mmc_slot(host).set_sleep)
1040 +       if (mmc_slot(host).set_sleep && new_state != EXTDISABLED)
1041                 mmc_slot(host).set_sleep(host->dev, host->slot_id, 1, 0,
1042                                         sleep);
1043         /* FIXME: turn off bus power and perhaps interrupts too */
1044 @@ -1334,18 +1349,20 @@
1045         mmc_release_host(host->mmc);
1046  
1047         dev_dbg(mmc_dev(host->mmc), "DISABLED -> %s\n",
1048 -               host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP");
1049 +               host->dpm_state == CARDSLEEP ? "CARDSLEEP" :
1050 +               host->dpm_state == REGSLEEP ?  "REGSLEEP" : "EXTDISABLED");
1051  
1052         if ((host->mmc->caps & MMC_CAP_NONREMOVABLE) ||
1053             mmc_slot(host).card_detect ||
1054             (mmc_slot(host).get_cover_state &&
1055              mmc_slot(host).get_cover_state(host->dev, host->slot_id)))
1056 -               return msecs_to_jiffies(OMAP_MMC_OFF_TIMEOUT);
1057 +               return msecs_to_jiffies(new_state == EXTDISABLED ?
1058 +                      OMAP_MMC_OFF_NOSLP_TIMEOUT : OMAP_MMC_OFF_TIMEOUT);
1059  
1060         return 0;
1061  }
1062  
1063 -/* Handler for [REGSLEEP / CARDSLEEP -> OFF] transition */
1064 +/* Handler for [EXTDISABLED / REGSLEEP / CARDSLEEP -> OFF] transition */
1065  static int omap_hsmmc_sleep_to_off(struct omap_hsmmc_host *host)
1066  {
1067         if (!mmc_try_claim_host(host->mmc))
1068 @@ -1364,7 +1381,8 @@
1069         host->power_mode = MMC_POWER_OFF;
1070  
1071         dev_dbg(mmc_dev(host->mmc), "%s -> OFF\n",
1072 -               host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP");
1073 +               host->dpm_state == CARDSLEEP ? "CARDSLEEP" :
1074 +               host->dpm_state == REGSLEEP ?  "REGSLEEP" : "EXTDISABLED");
1075  
1076         host->dpm_state = OFF;
1077  
1078 @@ -1405,14 +1423,15 @@
1079         omap_hsmmc_context_restore(host);
1080         asleep = omap_hsmmc_full_sleep(host->mmc->card) &&
1081                 (host->dpm_state == CARDSLEEP);
1082 -       if (mmc_slot(host).set_sleep)
1083 +       if (mmc_slot(host).set_sleep && host->dpm_state != EXTDISABLED)
1084                 mmc_slot(host).set_sleep(host->dev, host->slot_id, 0,
1085                                         host->vdd, asleep);
1086         if (mmc_card_can_sleep(host->mmc))
1087                 mmc_card_awake(host->mmc);
1088  
1089         dev_dbg(mmc_dev(host->mmc), "%s -> ENABLED\n",
1090 -               host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP");
1091 +               host->dpm_state == CARDSLEEP ? "CARDSLEEP" :
1092 +               host->dpm_state == REGSLEEP ?  "REGSLEEP" : "EXTDISABLED");
1093  
1094         if (host->pdata->set_pm_constraints)
1095                 host->pdata->set_pm_constraints(host->dev, 1);
1096 @@ -1454,6 +1473,7 @@
1097         switch (host->dpm_state) {
1098         case DISABLED:
1099                 return omap_hsmmc_disabled_to_enabled(host);
1100 +       case EXTDISABLED:
1101         case CARDSLEEP:
1102         case REGSLEEP:
1103                 return omap_hsmmc_sleep_to_enabled(host);
1104 @@ -1484,6 +1504,7 @@
1105         }
1106         case DISABLED:
1107                 return omap_hsmmc_disabled_to_sleep(host);
1108 +       case EXTDISABLED:
1109         case CARDSLEEP:
1110         case REGSLEEP:
1111                 return omap_hsmmc_sleep_to_off(host);
1112 --- kernel-power-2.6.28.orig/drivers/net/wireless/wl12xx/wl1251_acx.c
1113 +++ kernel-power-2.6.28/drivers/net/wireless/wl12xx/wl1251_acx.c
1114 @@ -910,7 +910,7 @@
1115         }
1116  
1117         *mactime = tsf_info->current_tsf_lsb |
1118 -               (tsf_info->current_tsf_msb << 31);
1119 +               ((unsigned long long) tsf_info->current_tsf_msb << 32);
1120  
1121  out:
1122         kfree(tsf_info);
1123 --- kernel-power-2.6.28.orig/drivers/net/wireless/wl12xx/wl1251_cmd.c
1124 +++ kernel-power-2.6.28/drivers/net/wireless/wl12xx/wl1251_cmd.c
1125 @@ -242,7 +242,7 @@
1126         if (ret < 0) {
1127                 wl1251_error("tx %s cmd for channel %d failed",
1128                              enable ? "start" : "stop", channel);
1129 -               return ret;
1130 +               goto out;
1131         }
1132  
1133         wl1251_debug(DEBUG_BOOT, "tx %s cmd channel %d",
1134 --- kernel-power-2.6.28.orig/include/linux/sched.h
1135 +++ kernel-power-2.6.28/include/linux/sched.h
1136 @@ -1665,11 +1665,11 @@
1137  static inline void wake_up_idle_cpu(int cpu) { }
1138  #endif
1139  
1140 +extern unsigned int sysctl_sched_child_runs_first;
1141  #ifdef CONFIG_SCHED_DEBUG
1142  extern unsigned int sysctl_sched_latency;
1143  extern unsigned int sysctl_sched_min_granularity;
1144  extern unsigned int sysctl_sched_wakeup_granularity;
1145 -extern unsigned int sysctl_sched_child_runs_first;
1146  extern unsigned int sysctl_sched_features;
1147  extern unsigned int sysctl_sched_migration_cost;
1148  extern unsigned int sysctl_sched_nr_migrate;
1149 --- kernel-power-2.6.28.orig/include/linux/swap.h
1150 +++ kernel-power-2.6.28/include/linux/swap.h
1151 @@ -130,6 +130,17 @@
1152  #define SWAP_MAP_MAX   0x7fff
1153  #define SWAP_MAP_BAD   0x8000
1154  
1155 +#define SWAP_GAP_TREE_SIZE 10
1156 +#define SWAP_GAP_RESCAN_TIMEO_MSEC 2000
1157 +#define swap_gap_len(gap) ((gap)->end - (gap)->next)
1158 +#define swap_gap_rb_entry(node) rb_entry(node, struct swap_gap_node, rb_node)
1159 +/* Struct to store gaps info */
1160 +struct swap_gap_node {
1161 +       struct rb_node rb_node;
1162 +       unsigned int next;
1163 +       unsigned int end;
1164 +};
1165 +
1166  /*
1167   * The in-memory structure used to track swap areas.
1168   */
1169 @@ -157,6 +168,9 @@
1170         unsigned int gap_next;
1171         unsigned int gap_end;
1172         unsigned int gaps_exist;
1173 +       struct rb_root gaps_tree;
1174 +       struct swap_gap_node *gap_pool_arr;
1175 +       unsigned long gap_last_scan;
1176         unsigned int lowest_bit;
1177         unsigned int highest_bit;
1178         unsigned int cluster_next;
1179 --- kernel-power-2.6.28.orig/include/net/bluetooth/sco.h
1180 +++ kernel-power-2.6.28/include/net/bluetooth/sco.h
1181 @@ -29,7 +29,7 @@
1182  #define SCO_DEFAULT_MTU                500
1183  #define SCO_DEFAULT_FLUSH_TO   0xFFFF
1184  
1185 -#define SCO_CONN_TIMEOUT       (HZ * 40)
1186 +#define SCO_CONN_TIMEOUT       (HZ * 25)
1187  #define SCO_DISCONN_TIMEOUT    (HZ * 2)
1188  #define SCO_CONN_IDLE_TIMEOUT  (HZ * 60)
1189  
1190 --- kernel-power-2.6.28.orig/kernel/sched_fair.c
1191 +++ kernel-power-2.6.28/kernel/sched_fair.c
1192 @@ -48,10 +48,10 @@
1193  static unsigned int sched_nr_latency = 5;
1194  
1195  /*
1196 - * After fork, child runs first. (default) If set to 0 then
1197 + * After fork, child runs first. If set to 0 then
1198   * parent will (try to) run first.
1199   */
1200 -const_debug unsigned int sysctl_sched_child_runs_first = 1;
1201 +unsigned int sysctl_sched_child_runs_first __read_mostly;
1202  
1203  /*
1204   * sys_sched_yield() compat mode
1205 --- kernel-power-2.6.28.orig/kernel/sysctl.c
1206 +++ kernel-power-2.6.28/kernel/sysctl.c
1207 @@ -235,6 +235,14 @@
1208  #endif
1209  
1210  static struct ctl_table kern_table[] = {
1211 +       {
1212 +               .ctl_name       = CTL_UNNUMBERED,
1213 +               .procname       = "sched_child_runs_first",
1214 +               .data           = &sysctl_sched_child_runs_first,
1215 +               .maxlen         = sizeof(unsigned int),
1216 +               .mode           = 0644,
1217 +               .proc_handler   = &proc_dointvec,
1218 +       },
1219  #ifdef CONFIG_SCHED_DEBUG
1220         {
1221                 .ctl_name       = CTL_UNNUMBERED,
1222 @@ -289,14 +297,6 @@
1223         },
1224         {
1225                 .ctl_name       = CTL_UNNUMBERED,
1226 -               .procname       = "sched_child_runs_first",
1227 -               .data           = &sysctl_sched_child_runs_first,
1228 -               .maxlen         = sizeof(unsigned int),
1229 -               .mode           = 0644,
1230 -               .proc_handler   = &proc_dointvec,
1231 -       },
1232 -       {
1233 -               .ctl_name       = CTL_UNNUMBERED,
1234                 .procname       = "sched_features",
1235                 .data           = &sysctl_sched_features,
1236                 .maxlen         = sizeof(unsigned int),
1237 --- kernel-power-2.6.28.orig/mm/swapfile.c
1238 +++ kernel-power-2.6.28/mm/swapfile.c
1239 @@ -996,11 +996,55 @@
1240         spin_unlock(&mmlist_lock);
1241  }
1242  
1243 +void gaps_rbtree_insert(struct swap_info_struct *sis,
1244 +                       struct swap_gap_node *node)
1245 +{
1246 +       struct rb_node **p = &sis->gaps_tree.rb_node;
1247 +       struct rb_node *parent = NULL;
1248 +       struct swap_gap_node *tmp;
1249 +
1250 +       while (*p) {
1251 +               parent = *p;
1252 +               tmp = rb_entry(parent, struct swap_gap_node, rb_node);
1253 +               if (swap_gap_len(node) < swap_gap_len(tmp))
1254 +                       p = &(*p)->rb_left;
1255 +               else
1256 +                       p = &(*p)->rb_right;
1257 +       }
1258 +       rb_link_node(&node->rb_node, parent, p);
1259 +       rb_insert_color(&node->rb_node, &sis->gaps_tree);
1260 +}
1261 +
1262 +void gaps_rbtree_add(struct swap_info_struct *sis,
1263 +                               unsigned int next, unsigned int end,
1264 +                               struct swap_gap_node **gap_min, int *pos)
1265 +{
1266 +       struct swap_gap_node *gap_node;
1267 +       if (*pos < SWAP_GAP_TREE_SIZE) {
1268 +               gap_node = &sis->gap_pool_arr[*pos];
1269 +               *pos += 1;
1270 +       } else if (swap_gap_len(*gap_min) > end - next) {
1271 +               return;
1272 +       } else {
1273 +               gap_node = *gap_min;
1274 +               rb_erase(&gap_node->rb_node, &sis->gaps_tree);
1275 +               *gap_min = swap_gap_rb_entry(rb_first(&sis->gaps_tree));
1276 +       }
1277 +       gap_node->next = next;
1278 +       gap_node->end = end;
1279 +       if (gap_min && (*gap_min == NULL ||
1280 +                       swap_gap_len(*gap_min) > swap_gap_len(gap_node)))
1281 +               *gap_min = gap_node;
1282 +       gaps_rbtree_insert(sis, gap_node);
1283 +}
1284 +
1285  /* Find the largest sequence of free pages */
1286  int find_gap(struct swap_info_struct *sis)
1287  {
1288         unsigned i, uninitialized_var(start), uninitialized_var(gap_next);
1289 -       unsigned uninitialized_var(gap_end), gap_size = 0;
1290 +       unsigned uninitialized_var(gap_end);
1291 +       struct swap_gap_node *gap_max, *gap_min = NULL;
1292 +       int pos = 0;
1293         int in_gap = 0;
1294  
1295         spin_unlock(&sis->remap_lock);
1296 @@ -1017,6 +1061,11 @@
1297                 mutex_unlock(&sis->remap_mutex);
1298                 return -1;
1299         }
1300 +       if (time_after(jiffies, sis->gap_last_scan +
1301 +                       msecs_to_jiffies(SWAP_GAP_RESCAN_TIMEO_MSEC)))
1302 +               sis->gaps_tree = RB_ROOT;
1303 +       if (!RB_EMPTY_ROOT(&sis->gaps_tree))
1304 +               goto out;
1305         spin_unlock(&sis->remap_lock);
1306  
1307         /*
1308 @@ -1028,11 +1077,7 @@
1309                 if (in_gap) {
1310                         if (!(sis->swap_remap[i] & 0x80000000))
1311                                 continue;
1312 -                       if (i - start > gap_size) {
1313 -                               gap_next = start;
1314 -                               gap_end = i - 1;
1315 -                               gap_size = i - start;
1316 -                       }
1317 +                       gaps_rbtree_add(sis, start, i - 1, &gap_min, &pos);
1318                         in_gap = 0;
1319                 } else {
1320                         if (sis->swap_remap[i] & 0x80000000)
1321 @@ -1043,13 +1088,14 @@
1322                 cond_resched();
1323         }
1324         spin_lock(&sis->remap_lock);
1325 -       if (in_gap && i - start > gap_size) {
1326 -               sis->gap_next = start;
1327 -               sis->gap_end = i - 1;
1328 -       } else {
1329 -               sis->gap_next = gap_next;
1330 -               sis->gap_end = gap_end;
1331 -       }
1332 +       if (in_gap)
1333 +               gaps_rbtree_add(sis, start, i - 1, &gap_min, &pos);
1334 +       sis->gap_last_scan = jiffies;
1335 +out:
1336 +       gap_max = swap_gap_rb_entry(rb_last(&sis->gaps_tree));
1337 +       rb_erase(&gap_max->rb_node, &sis->gaps_tree);
1338 +       sis->gap_next = gap_max->next;
1339 +       sis->gap_end = gap_max->end;
1340         mutex_unlock(&sis->remap_mutex);
1341         return 0;
1342  }
1343 @@ -1471,6 +1517,7 @@
1344         p->flags = 0;
1345         spin_unlock(&swap_lock);
1346         mutex_unlock(&swapon_mutex);
1347 +       kfree(p->gap_pool_arr);
1348         vfree(p->swap_remap);
1349         vfree(swap_map);
1350         inode = mapping->host;
1351 @@ -1825,6 +1872,14 @@
1352                 goto bad_swap;
1353         }
1354  
1355 +       p->gap_pool_arr = kmalloc(sizeof(struct swap_gap_node)*
1356 +                               SWAP_GAP_TREE_SIZE, GFP_KERNEL);
1357 +       if (!p->gap_pool_arr) {
1358 +               error = -ENOMEM;
1359 +               goto bad_swap;
1360 +       }
1361 +       p->gaps_tree = RB_ROOT;
1362 +
1363         mutex_lock(&swapon_mutex);
1364         spin_lock(&swap_lock);
1365         if (swap_flags & SWAP_FLAG_PREFER)
1366 --- kernel-power-2.6.28.orig/net/bluetooth/hci_conn.c
1367 +++ kernel-power-2.6.28/net/bluetooth/hci_conn.c
1368 @@ -375,6 +375,9 @@
1369  
1370         if (acl->state == BT_CONNECTED &&
1371                         (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1372 +               acl->power_save = 1;
1373 +               hci_conn_enter_active_mode(acl);
1374 +
1375                 if (lmp_esco_capable(hdev))
1376                         hci_setup_sync(sco, acl->handle);
1377                 else
1378 --- kernel-power-2.6.28.orig/net/bluetooth/hci_event.c
1379 +++ kernel-power-2.6.28/net/bluetooth/hci_event.c
1380 @@ -1056,6 +1056,8 @@
1381         if (conn) {
1382                 if (!ev->status)
1383                         conn->link_mode |= HCI_LM_AUTH;
1384 +               else
1385 +                       conn->sec_level = BT_SECURITY_LOW;
1386  
1387                 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1388  
1389 @@ -1709,6 +1711,7 @@
1390                 break;
1391  
1392         case 0x1c:      /* SCO interval rejected */
1393 +       case 0x1a:      /* Unsupported Remote Feature */
1394         case 0x1f:      /* Unspecified error */
1395                 if (conn->out && conn->attempt < 2) {
1396                         conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |