bfs 350 -> 357, kernel-power v41 -> kernel-bfs
[kernel-bfs] / kernel-bfs-2.6.28 / debian / patches / nokia-20103103+0m5.diff
1 --- kernel-power-2.6.28.orig/arch/arm/include/asm/cacheflush.h
2 +++ kernel-power-2.6.28/arch/arm/include/asm/cacheflush.h
3 @@ -138,16 +138,16 @@
4   *     Please note that the implementation of these, and the required
5   *     effects are cache-type (VIVT/VIPT/PIPT) specific.
6   *
7 - *     flush_cache_kern_all()
8 + *     flush_kern_all()
9   *
10   *             Unconditionally clean and invalidate the entire cache.
11   *
12 - *     flush_cache_user_mm(mm)
13 + *     flush_user_all()
14   *
15   *             Clean and invalidate all user space cache entries
16   *             before a change of page tables.
17   *
18 - *     flush_cache_user_range(start, end, flags)
19 + *     flush_user_range(start, end, flags)
20   *
21   *             Clean and invalidate a range of cache entries in the
22   *             specified address space before a change of page tables.
23 @@ -163,6 +163,20 @@
24   *             - start  - virtual start address
25   *             - end    - virtual end address
26   *
27 + *     coherent_user_range(start, end)
28 + *
29 + *             Ensure coherency between the Icache and the Dcache in the
30 + *             region described by start, end.  If you have non-snooping
31 + *             Harvard caches, you need to implement this function.
32 + *             - start  - virtual start address
33 + *             - end    - virtual end address
34 + *
35 + *     flush_kern_dcache_area(kaddr, size)
36 + *
37 + *             Ensure that the data held in page is written back.
38 + *             - kaddr  - page address
39 + *             - size   - region size
40 + *
41   *     DMA Cache Coherency
42   *     ===================
43   *
44 @@ -375,7 +389,7 @@
45   * Harvard caches are synchronised for the user space address range.
46   * This is used for the ARM private sys_cacheflush system call.
47   */
48 -#define flush_cache_user_range(vma,start,end) \
49 +#define flush_cache_user_range(start,end) \
50         __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
51  
52  /*
53 --- kernel-power-2.6.28.orig/arch/arm/kernel/traps.c
54 +++ kernel-power-2.6.28/arch/arm/kernel/traps.c
55 @@ -418,7 +418,9 @@
56                 if (end > vma->vm_end)
57                         end = vma->vm_end;
58  
59 -               flush_cache_user_range(vma, start, end);
60 +               up_read(&mm->mmap_sem);
61 +               flush_cache_user_range(start, end);
62 +               return;
63         }
64         up_read(&mm->mmap_sem);
65  }
66 --- kernel-power-2.6.28.orig/arch/arm/mach-omap2/smartreflex.c
67 +++ kernel-power-2.6.28/arch/arm/mach-omap2/smartreflex.c
68 @@ -890,7 +890,7 @@
69                 return SR_FAIL;
70         }
71  
72 -       if (sr->is_autocomp_active) {
73 +       if (sr->is_autocomp_active && !sr->is_sr_reset) {
74                 WARN(1, "SR: Must not transmit VCBYPASS command while SR is "
75                      "active");
76                 return SR_FAIL;
77 --- kernel-power-2.6.28.orig/arch/arm/mm/fault.c
78 +++ kernel-power-2.6.28/arch/arm/mm/fault.c
79 @@ -387,6 +387,9 @@
80         if (addr < TASK_SIZE)
81                 return do_page_fault(addr, fsr, regs);
82  
83 +       if (user_mode(regs))
84 +               goto bad_area;
85 +
86         index = pgd_index(addr);
87  
88         /*
89 @@ -449,7 +452,12 @@
90         { do_bad,               SIGILL,  BUS_ADRALN,    "alignment exception"              },
91         { do_bad,               SIGKILL, 0,             "terminal exception"               },
92         { do_bad,               SIGILL,  BUS_ADRALN,    "alignment exception"              },
93 +/* Do we need runtime check ? */
94 +#if __LINUX_ARM_ARCH__ < 6
95         { do_bad,               SIGBUS,  0,             "external abort on linefetch"      },
96 +#else
97 +       { do_translation_fault, SIGSEGV, SEGV_MAPERR,   "I-cache maintenance fault"        },
98 +#endif
99         { do_translation_fault, SIGSEGV, SEGV_MAPERR,   "section translation fault"        },
100         { do_bad,               SIGBUS,  0,             "external abort on linefetch"      },
101         { do_page_fault,        SIGSEGV, SEGV_MAPERR,   "page translation fault"           },
102 --- kernel-power-2.6.28.orig/arch/arm/mm/mmu.c
103 +++ kernel-power-2.6.28/arch/arm/mm/mmu.c
104 @@ -953,4 +953,6 @@
105                 pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
106                 flush_pmd_entry(pmd);
107         }
108 +
109 +       local_flush_tlb_all();
110  }
111 --- kernel-power-2.6.28.orig/arch/arm/mm/proc-v6.S
112 +++ kernel-power-2.6.28/arch/arm/mm/proc-v6.S
113 @@ -56,8 +56,6 @@
114   *     to what would be the reset vector.
115   *
116   *     - loc   - location to jump to for soft reset
117 - *
118 - *     It is assumed that:
119   */
120         .align  5
121  ENTRY(cpu_v6_reset)
122 --- kernel-power-2.6.28.orig/arch/arm/mm/proc-v7.S
123 +++ kernel-power-2.6.28/arch/arm/mm/proc-v7.S
124 @@ -28,7 +28,14 @@
125  ENDPROC(cpu_v7_proc_init)
126  
127  ENTRY(cpu_v7_proc_fin)
128 -       mov     pc, lr
129 +       stmfd   sp!, {lr}
130 +       cpsid   if                              @ disable interrupts
131 +       bl      v7_flush_kern_cache_all
132 +       mrc     p15, 0, r0, c1, c0, 0           @ ctrl register
133 +       bic     r0, r0, #0x1000                 @ ...i............
134 +       bic     r0, r0, #0x0006                 @ .............ca.
135 +       mcr     p15, 0, r0, c1, c0, 0           @ disable caches
136 +       ldmfd   sp!, {pc}
137  ENDPROC(cpu_v7_proc_fin)
138  
139  /*
140 @@ -39,8 +46,6 @@
141   *     to what would be the reset vector.
142   *
143   *     - loc   - location to jump to for soft reset
144 - *
145 - *     It is assumed that:
146   */
147         .align  5
148  ENTRY(cpu_v7_reset)
149 --- kernel-power-2.6.28.orig/block/cfq-iosched.c
150 +++ kernel-power-2.6.28/block/cfq-iosched.c
151 @@ -84,6 +84,11 @@
152          */
153         struct cfq_rb_root service_tree;
154         unsigned int busy_queues;
155 +       /*
156 +        * Used to track any pending rt requests so we can pre-empt current
157 +        * non-RT cfqq in service when this value is non-zero.
158 +        */
159 +       unsigned int busy_rt_queues;
160  
161         int rq_in_driver;
162         int sync_flight;
163 @@ -155,6 +160,7 @@
164  
165         unsigned long slice_end;
166         long slice_resid;
167 +       unsigned int slice_dispatch;
168  
169         /* pending metadata requests */
170         int meta_pending;
171 @@ -171,13 +177,12 @@
172  enum cfqq_state_flags {
173         CFQ_CFQQ_FLAG_on_rr = 0,        /* on round-robin busy list */
174         CFQ_CFQQ_FLAG_wait_request,     /* waiting for a request */
175 +       CFQ_CFQQ_FLAG_must_dispatch,    /* must be allowed a dispatch */
176         CFQ_CFQQ_FLAG_must_alloc,       /* must be allowed rq alloc */
177         CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
178 -       CFQ_CFQQ_FLAG_must_dispatch,    /* must dispatch, even if expired */
179         CFQ_CFQQ_FLAG_fifo_expire,      /* FIFO checked in this slice */
180         CFQ_CFQQ_FLAG_idle_window,      /* slice idling enabled */
181         CFQ_CFQQ_FLAG_prio_changed,     /* task priority has changed */
182 -       CFQ_CFQQ_FLAG_queue_new,        /* queue never been serviced */
183         CFQ_CFQQ_FLAG_slice_new,        /* no requests dispatched in slice */
184         CFQ_CFQQ_FLAG_sync,             /* synchronous queue */
185  };
186 @@ -198,13 +203,12 @@
187  
188  CFQ_CFQQ_FNS(on_rr);
189  CFQ_CFQQ_FNS(wait_request);
190 +CFQ_CFQQ_FNS(must_dispatch);
191  CFQ_CFQQ_FNS(must_alloc);
192  CFQ_CFQQ_FNS(must_alloc_slice);
193 -CFQ_CFQQ_FNS(must_dispatch);
194  CFQ_CFQQ_FNS(fifo_expire);
195  CFQ_CFQQ_FNS(idle_window);
196  CFQ_CFQQ_FNS(prio_changed);
197 -CFQ_CFQQ_FNS(queue_new);
198  CFQ_CFQQ_FNS(slice_new);
199  CFQ_CFQQ_FNS(sync);
200  #undef CFQ_CFQQ_FNS
201 @@ -562,6 +566,8 @@
202         BUG_ON(cfq_cfqq_on_rr(cfqq));
203         cfq_mark_cfqq_on_rr(cfqq);
204         cfqd->busy_queues++;
205 +       if (cfq_class_rt(cfqq))
206 +               cfqd->busy_rt_queues++;
207  
208         cfq_resort_rr_list(cfqd, cfqq);
209  }
210 @@ -581,6 +587,8 @@
211  
212         BUG_ON(!cfqd->busy_queues);
213         cfqd->busy_queues--;
214 +       if (cfq_class_rt(cfqq))
215 +               cfqd->busy_rt_queues--;
216  }
217  
218  /*
219 @@ -765,10 +773,15 @@
220         if (cfqq) {
221                 cfq_log_cfqq(cfqd, cfqq, "set_active");
222                 cfqq->slice_end = 0;
223 +               cfqq->slice_dispatch = 0;
224 +
225 +               cfq_clear_cfqq_wait_request(cfqq);
226 +               cfq_clear_cfqq_must_dispatch(cfqq);
227                 cfq_clear_cfqq_must_alloc_slice(cfqq);
228                 cfq_clear_cfqq_fifo_expire(cfqq);
229                 cfq_mark_cfqq_slice_new(cfqq);
230 -               cfq_clear_cfqq_queue_new(cfqq);
231 +
232 +               del_timer(&cfqd->idle_slice_timer);
233         }
234  
235         cfqd->active_queue = cfqq;
236 @@ -786,7 +799,6 @@
237         if (cfq_cfqq_wait_request(cfqq))
238                 del_timer(&cfqd->idle_slice_timer);
239  
240 -       cfq_clear_cfqq_must_dispatch(cfqq);
241         cfq_clear_cfqq_wait_request(cfqq);
242  
243         /*
244 @@ -915,7 +927,6 @@
245             (sample_valid(cic->ttime_samples) && cic->ttime_mean > 2))
246                 return;
247  
248 -       cfq_mark_cfqq_must_dispatch(cfqq);
249         cfq_mark_cfqq_wait_request(cfqq);
250  
251         /*
252 @@ -1001,10 +1012,24 @@
253         /*
254          * The active queue has run out of time, expire it and select new.
255          */
256 -       if (cfq_slice_used(cfqq))
257 +       if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq))
258                 goto expire;
259  
260         /*
261 +        * If we have a RT cfqq waiting, then we pre-empt the current non-rt
262 +        * cfqq.
263 +        */
264 +       if (!cfq_class_rt(cfqq) && cfqd->busy_rt_queues) {
265 +               /*
266 +                * We simulate this as cfqq timed out so that it gets to bank
267 +                * the remaining of its time slice.
268 +                */
269 +               cfq_log_cfqq(cfqd, cfqq, "preempt");
270 +               cfq_slice_expired(cfqd, 1);
271 +               goto new_queue;
272 +       }
273 +
274 +       /*
275          * The active queue has requests and isn't expired, allow it to
276          * dispatch.
277          */
278 @@ -1030,59 +1055,6 @@
279         return cfqq;
280  }
281  
282 -/*
283 - * Dispatch some requests from cfqq, moving them to the request queue
284 - * dispatch list.
285 - */
286 -static int
287 -__cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
288 -                       int max_dispatch)
289 -{
290 -       int dispatched = 0;
291 -
292 -       BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
293 -
294 -       do {
295 -               struct request *rq;
296 -
297 -               /*
298 -                * follow expired path, else get first next available
299 -                */
300 -               rq = cfq_check_fifo(cfqq);
301 -               if (rq == NULL)
302 -                       rq = cfqq->next_rq;
303 -
304 -               /*
305 -                * finally, insert request into driver dispatch list
306 -                */
307 -               cfq_dispatch_insert(cfqd->queue, rq);
308 -
309 -               dispatched++;
310 -
311 -               if (!cfqd->active_cic) {
312 -                       atomic_inc(&RQ_CIC(rq)->ioc->refcount);
313 -                       cfqd->active_cic = RQ_CIC(rq);
314 -               }
315 -
316 -               if (RB_EMPTY_ROOT(&cfqq->sort_list))
317 -                       break;
318 -
319 -       } while (dispatched < max_dispatch);
320 -
321 -       /*
322 -        * expire an async queue immediately if it has used up its slice. idle
323 -        * queue always expire after 1 dispatch round.
324 -        */
325 -       if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
326 -           dispatched >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
327 -           cfq_class_idle(cfqq))) {
328 -               cfqq->slice_end = jiffies + 1;
329 -               cfq_slice_expired(cfqd, 0);
330 -       }
331 -
332 -       return dispatched;
333 -}
334 -
335  static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
336  {
337         int dispatched = 0;
338 @@ -1116,11 +1088,45 @@
339         return dispatched;
340  }
341  
342 +/*
343 + * Dispatch a request from cfqq, moving them to the request queue
344 + * dispatch list.
345 + */
346 +static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
347 +{
348 +       struct request *rq;
349 +
350 +       BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
351 +
352 +       /*
353 +        * follow expired path, else get first next available
354 +        */
355 +       rq = cfq_check_fifo(cfqq);
356 +       if (!rq)
357 +               rq = cfqq->next_rq;
358 +
359 +       /*
360 +        * insert request into driver dispatch list
361 +        */
362 +       cfq_dispatch_insert(cfqd->queue, rq);
363 +
364 +       if (!cfqd->active_cic) {
365 +               struct cfq_io_context *cic = RQ_CIC(rq);
366 +
367 +               atomic_inc(&cic->ioc->refcount);
368 +               cfqd->active_cic = cic;
369 +       }
370 +}
371 +
372 +/*
373 + * Find the cfqq that we need to service and move a request from that to the
374 + * dispatch list
375 + */
376  static int cfq_dispatch_requests(struct request_queue *q, int force)
377  {
378         struct cfq_data *cfqd = q->elevator->elevator_data;
379         struct cfq_queue *cfqq;
380 -       int dispatched;
381 +       unsigned int max_dispatch;
382  
383         if (!cfqd->busy_queues)
384                 return 0;
385 @@ -1128,33 +1134,63 @@
386         if (unlikely(force))
387                 return cfq_forced_dispatch(cfqd);
388  
389 -       dispatched = 0;
390 -       while ((cfqq = cfq_select_queue(cfqd)) != NULL) {
391 -               int max_dispatch;
392 +       cfqq = cfq_select_queue(cfqd);
393 +       if (!cfqq)
394 +               return 0;
395 +
396 +       /*
397 +        * If this is an async queue and we have sync IO in flight, let it wait
398 +        */
399 +       if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
400 +               return 0;
401 +
402 +       max_dispatch = cfqd->cfq_quantum;
403 +       if (cfq_class_idle(cfqq))
404 +               max_dispatch = 1;
405  
406 -               max_dispatch = cfqd->cfq_quantum;
407 +       /*
408 +        * Does this cfqq already have too much IO in flight?
409 +        */
410 +       if (cfqq->dispatched >= max_dispatch) {
411 +               /*
412 +                * idle queue must always only have a single IO in flight
413 +                */
414                 if (cfq_class_idle(cfqq))
415 -                       max_dispatch = 1;
416 +                       return 0;
417  
418 -               if (cfqq->dispatched >= max_dispatch) {
419 -                       if (cfqd->busy_queues > 1)
420 -                               break;
421 -                       if (cfqq->dispatched >= 4 * max_dispatch)
422 -                               break;
423 -               }
424 +               /*
425 +                * We have other queues, don't allow more IO from this one
426 +                */
427 +               if (cfqd->busy_queues > 1)
428 +                       return 0;
429  
430 -               if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
431 -                       break;
432 +               /*
433 +                * we are the only queue, allow up to 4 times of 'quantum'
434 +                */
435 +               if (cfqq->dispatched >= 4 * max_dispatch)
436 +                       return 0;
437 +       }
438  
439 -               cfq_clear_cfqq_must_dispatch(cfqq);
440 -               cfq_clear_cfqq_wait_request(cfqq);
441 -               del_timer(&cfqd->idle_slice_timer);
442 +       /*
443 +        * Dispatch a request from this cfqq
444 +        */
445 +       cfq_dispatch_request(cfqd, cfqq);
446 +       cfqq->slice_dispatch++;
447 +       cfq_clear_cfqq_must_dispatch(cfqq);
448  
449 -               dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
450 +       /*
451 +        * expire an async queue immediately if it has used up its slice. idle
452 +        * queue always expire after 1 dispatch round.
453 +        */
454 +       if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
455 +           cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
456 +           cfq_class_idle(cfqq))) {
457 +               cfqq->slice_end = jiffies + 1;
458 +               cfq_slice_expired(cfqd, 0);
459         }
460  
461 -       cfq_log(cfqd, "dispatched=%d", dispatched);
462 -       return dispatched;
463 +       cfq_log(cfqd, "dispatched a request");
464 +       return 1;
465  }
466  
467  /*
468 @@ -1318,7 +1354,15 @@
469                 unsigned long flags;
470  
471                 spin_lock_irqsave(q->queue_lock, flags);
472 -               __cfq_exit_single_io_context(cfqd, cic);
473 +
474 +               /*
475 +                * Ensure we get a fresh copy of the ->key to prevent
476 +                * race between exiting task and queue
477 +                */
478 +               smp_read_barrier_depends();
479 +               if (cic->key)
480 +                       __cfq_exit_single_io_context(cfqd, cic);
481 +
482                 spin_unlock_irqrestore(q->queue_lock, flags);
483         }
484  }
485 @@ -1472,7 +1516,6 @@
486                 cfqq->cfqd = cfqd;
487  
488                 cfq_mark_cfqq_prio_changed(cfqq);
489 -               cfq_mark_cfqq_queue_new(cfqq);
490  
491                 cfq_init_prio_data(cfqq, ioc);
492  
493 @@ -1797,6 +1840,12 @@
494         if (rq_is_meta(rq) && !cfqq->meta_pending)
495                 return 1;
496  
497 +       /*
498 +        * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
499 +        */
500 +       if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
501 +               return 1;
502 +
503         if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
504                 return 0;
505  
506 @@ -1853,23 +1902,28 @@
507  
508         if (cfqq == cfqd->active_queue) {
509                 /*
510 -                * if we are waiting for a request for this queue, let it rip
511 -                * immediately and flag that we must not expire this queue
512 -                * just now
513 +                * Remember that we saw a request from this process, but
514 +                * don't start queuing just yet. Otherwise we risk seeing lots
515 +                * of tiny requests, because we disrupt the normal plugging
516 +                * and merging. If the request is already larger than a single
517 +                * page, let it rip immediately. For that case we assume that
518 +                * merging is already done.
519                  */
520                 if (cfq_cfqq_wait_request(cfqq)) {
521 +                       if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE) {
522 +                               del_timer(&cfqd->idle_slice_timer);
523 +                               blk_start_queueing(cfqd->queue);
524 +                       }
525                         cfq_mark_cfqq_must_dispatch(cfqq);
526 -                       del_timer(&cfqd->idle_slice_timer);
527 -                       blk_start_queueing(cfqd->queue);
528                 }
529         } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
530                 /*
531                  * not the active queue - expire current slice if it is
532                  * idle and has expired it's mean thinktime or this new queue
533 -                * has some old slice time left and is of higher priority
534 +                * has some old slice time left and is of higher priority or
535 +                * this new queue is RT and the current one is BE
536                  */
537                 cfq_preempt_queue(cfqd, cfqq);
538 -               cfq_mark_cfqq_must_dispatch(cfqq);
539                 blk_start_queueing(cfqd->queue);
540         }
541  }
542 @@ -2129,6 +2183,12 @@
543                 timed_out = 0;
544  
545                 /*
546 +                * We saw a request before the queue expired, let it through
547 +                */
548 +               if (cfq_cfqq_must_dispatch(cfqq))
549 +                       goto out_kick;
550 +
551 +               /*
552                  * expired
553                  */
554                 if (cfq_slice_used(cfqq))
555 @@ -2144,10 +2204,8 @@
556                 /*
557                  * not expired and it has a request pending, let it dispatch
558                  */
559 -               if (!RB_EMPTY_ROOT(&cfqq->sort_list)) {
560 -                       cfq_mark_cfqq_must_dispatch(cfqq);
561 +               if (!RB_EMPTY_ROOT(&cfqq->sort_list))
562                         goto out_kick;
563 -               }
564         }
565  expire:
566         cfq_slice_expired(cfqd, timed_out);
567 --- kernel-power-2.6.28.orig/drivers/dsp/bridge/rmgr/drv.c
568 +++ kernel-power-2.6.28/drivers/dsp/bridge/rmgr/drv.c
569 @@ -517,11 +517,12 @@
570                 pDMMRes = pDMMList;
571                 pDMMList = pDMMList->next;
572                 if (pDMMRes->dmmAllocated) {
573 -                       status = PROC_UnMap(pDMMRes->hProcessor,
574 -                                (void *)pDMMRes->ulDSPResAddr, pCtxt);
575 -                       status = PROC_UnReserveMemory(pDMMRes->hProcessor,
576 -                                (void *)pDMMRes->ulDSPResAddr);
577 -                       pDMMRes->dmmAllocated = 0;
578 +                       /* PROC_UnMap frees pDMMRes */
579 +                       void *processor = pDMMRes->hProcessor;
580 +                       void *map_addr = (void*)pDMMRes->ulDSPAddr;
581 +                       void *rsv_addr = (void*)pDMMRes->ulDSPResAddr;
582 +                       status = PROC_UnMap(processor, map_addr, pCtxt);
583 +                       status = PROC_UnReserveMemory(processor, rsv_addr);
584                 }
585         }
586         return status;
587 --- kernel-power-2.6.28.orig/drivers/dsp/bridge/rmgr/proc.c
588 +++ kernel-power-2.6.28/drivers/dsp/bridge/rmgr/proc.c
589 @@ -750,6 +750,7 @@
590                         break;
591  
592                 start = vma->vm_end;
593 +               len -= size;
594         }
595  
596         if (!vma)
597 --- kernel-power-2.6.28.orig/drivers/i2c/chips/lis302dl.c
598 +++ kernel-power-2.6.28/drivers/i2c/chips/lis302dl.c
599 @@ -44,6 +44,7 @@
600  #      define LIS302_CTRL1_Y           (1 << 1)
601  #      define LIS302_CTRL1_X           (1 << 0)
602  #define LIS302_CTRL_2                  0x21
603 +#      define LIS302_CTRL2_BOOT        (1 << 6)
604  #define LIS302_CTRL_3                  0x22
605  #      define  LIS302_CTRL3_GND        0x00
606  #      define  LIS302_CTRL3_FF_WU_1    0x01
607 @@ -161,8 +162,13 @@
608         if (ret < 0)
609                 goto out;
610  
611 -       /* REG 2 */
612 -       /* Control High Pass filter selection. not used */
613 +       /* REG 2
614 +        * Boot is used to refresh internal registers
615 +        * Control High Pass filter selection. not used
616 +        */
617 +       ret = lis302dl_write(c, LIS302_CTRL_2, LIS302_CTRL2_BOOT);
618 +       if (ret < 0)
619 +               goto out;
620  
621         /* REG 3
622          * Interrupt CTRL register. One interrupt pin is used for
623 --- kernel-power-2.6.28.orig/drivers/leds/leds-lp5523.c
624 +++ kernel-power-2.6.28/drivers/leds/leds-lp5523.c
625 @@ -32,6 +32,7 @@
626  #include <linux/wait.h>
627  #include <linux/leds.h>
628  #include <linux/leds-lp5523.h>
629 +#include <linux/workqueue.h>
630  
631  #define LP5523_DRIVER_NAME             "lp5523"
632  #define LP5523_REG_ENABLE              0x00
633 @@ -120,6 +121,8 @@
634         u8                      led_nr;
635         u8                      led_current;
636         struct led_classdev     cdev;
637 +       struct work_struct brightness_work;
638 +       u8                      brightness;
639  };
640  
641  struct lp5523_chip {
642 @@ -161,6 +164,8 @@
643  static void lp5523_work(struct work_struct  *work);
644  static irqreturn_t lp5523_irq(int irq, void *_chip);
645  
646 +static void lp5523_led_brightness_work(struct work_struct *work);
647 +
648  
649  static int lp5523_write(struct i2c_client *client, u8 reg, u8 value)
650  {
651 @@ -476,6 +481,16 @@
652                              enum led_brightness brightness)
653  {
654         struct lp5523_led *led = cdev_to_led(cdev);
655 +       led->brightness = (u8)brightness;
656 +
657 +       schedule_work(&led->brightness_work);
658 +}
659 +
660 +static void lp5523_led_brightness_work(struct work_struct *work)
661 +{
662 +       struct lp5523_led *led = container_of(work,
663 +                                             struct lp5523_led,
664 +                                             brightness_work);
665         struct lp5523_chip *chip = led_to_lp5523(led);
666         struct i2c_client *client = chip->client;
667  
668 @@ -483,7 +498,7 @@
669  
670         lp5523_write(client,
671                      LP5523_REG_LED_PWM_BASE + led->led_nr,
672 -                    (u8)brightness);
673 +                    led->brightness);
674  
675         mutex_unlock(&chip->lock);
676  }
677 @@ -907,6 +922,8 @@
678                         dev_err(&client->dev, "error initializing leds\n");
679                         goto fail2;
680                 }
681 +               INIT_WORK(&(chip->leds[i].brightness_work),
682 +                         lp5523_led_brightness_work);
683         }
684  
685         ret = lp5523_register_sysfs(client);
686 @@ -916,8 +933,10 @@
687         }
688         return ret;
689  fail2:
690 -       for (i = 0; i < pdata->num_leds; i++)
691 +       for (i = 0; i < pdata->num_leds; i++) {
692                 led_classdev_unregister(&chip->leds[i].cdev);
693 +               cancel_work_sync(&chip->leds[i].brightness_work);
694 +               }
695  
696  fail1:
697         kfree(chip);
698 @@ -931,8 +950,10 @@
699  
700         lp5523_unregister_sysfs(client);
701  
702 -       for (i = 0; i < chip->num_leds; i++)
703 +       for (i = 0; i < chip->num_leds; i++) {
704                 led_classdev_unregister(&chip->leds[i].cdev);
705 +               cancel_work_sync(&chip->leds[i].brightness_work);
706 +               }
707  
708         kfree(chip);
709  
710 --- kernel-power-2.6.28.orig/drivers/media/radio/radio-si4713.c
711 +++ kernel-power-2.6.28/drivers/media/radio/radio-si4713.c
712 @@ -54,6 +54,25 @@
713  /* module parameters */
714  static int radio_nr = -1;      /* radio device minor (-1 ==> auto assign) */
715  
716 +/* properties lock for write operations */
717 +static int config_locked;
718 +
719 +/* saved power levels */
720 +static unsigned int max_pl;
721 +static unsigned int min_pl;
722 +
723 +/* structure for pid registration */
724 +struct pid_list {
725 +       pid_t pid;
726 +       struct list_head plist;
727 +};
728 +
729 +#define APP_MAX_NUM    2
730 +
731 +static int pid_count;
732 +static LIST_HEAD(pid_list_head);
733 +static struct si4713_device *si4713_dev;
734 +
735  /*
736   * Sysfs properties
737   * Read and write functions
738 @@ -167,6 +186,37 @@
739                                         si4713_##prop##_write);
740  
741  /*
742 + * Config lock property
743 + */
744 +static ssize_t si4713_lock_write(struct device *dev,
745 +                               struct device_attribute *attr,
746 +                               const char *buf,
747 +                               size_t count)
748 +{
749 +       int l;
750 +
751 +       if (config_locked)
752 +               return -EPERM;
753 +
754 +       sscanf(buf, "%d", &l);
755 +
756 +       if (l != 0)
757 +               config_locked = 1;
758 +
759 +       return count;
760 +}
761 +
762 +static ssize_t si4713_lock_read(struct device *dev,
763 +                               struct device_attribute *attr,
764 +                               char *buf)
765 +{
766 +       return sprintf(buf, "%d\n", config_locked);
767 +}
768 +
769 +static DEVICE_ATTR(lock, S_IRUGO | S_IWUSR, si4713_lock_read,
770 +                       si4713_lock_write);
771 +
772 +/*
773   * Power level property
774   */
775  /* power_level (rw) 88 - 115 or 0 */
776 @@ -179,6 +229,9 @@
777         unsigned int p;
778         int rval, pl;
779  
780 +       if (config_locked)
781 +               return -EPERM;
782 +
783         if (!sdev) {
784                 rval = -ENODEV;
785                 goto exit;
786 @@ -320,6 +373,7 @@
787                         value > MAX_TONE_OFF_TIME)
788  
789  static struct attribute *attrs[] = {
790 +       &dev_attr_lock.attr,
791         &dev_attr_power_level.attr,
792         &dev_attr_antenna_capacitor.attr,
793         &dev_attr_rds_pi.attr,
794 @@ -366,13 +420,118 @@
795         return IRQ_HANDLED;
796  }
797  
798 +static int register_pid(pid_t pid)
799 +{
800 +       struct pid_list *pitem;
801 +
802 +       list_for_each_entry(pitem, &pid_list_head, plist) {
803 +               if (pitem->pid == pid)
804 +                       return -EINVAL;
805 +       }
806 +
807 +       pitem = kmalloc(sizeof(struct pid_list), GFP_KERNEL);
808 +
809 +       if (!pitem)
810 +               return -ENOMEM;
811 +
812 +       pitem->pid = pid;
813 +
814 +       list_add(&(pitem->plist), &pid_list_head);
815 +       pid_count++;
816 +
817 +       return 0;
818 +}
819 +
820 +static int unregister_pid(pid_t pid)
821 +{
822 +       struct pid_list *pitem, *n;
823 +
824 +       list_for_each_entry_safe(pitem, n, &pid_list_head, plist) {
825 +               if (pitem->pid == pid) {
826 +                       list_del(&(pitem->plist));
827 +                       pid_count--;
828 +
829 +                       kfree(pitem);
830 +
831 +                       return 0;
832 +               }
833 +       }
834 +       return -EINVAL;
835 +}
836 +
837 +static int si4713_priv_ioctl(struct inode *inode, struct file *file,
838 +               unsigned int cmd, unsigned long arg)
839 +{
840 +       unsigned int pow;
841 +       int pl, rval;
842 +
843 +       if (cmd != LOCK_LOW_POWER && cmd != RELEASE_LOW_POWER)
844 +               return video_ioctl2(inode, file, cmd, arg);
845 +
846 +       pl = si4713_get_power_level(si4713_dev);
847 +
848 +       if (pl < 0) {
849 +               rval = pl;
850 +               goto exit;
851 +       }
852 +
853 +       if (copy_from_user(&pow, (void __user *)arg, sizeof(pow))) {
854 +               rval = -EFAULT;
855 +               goto exit;
856 +       }
857 +
858 +       if (cmd == LOCK_LOW_POWER) {
859 +
860 +               if (pid_count == APP_MAX_NUM) {
861 +                       rval = -EPERM;
862 +                       goto exit;
863 +               }
864 +
865 +               if (pid_count == 0) {
866 +                       if (pow > pl) {
867 +                               rval = -EINVAL;
868 +                               goto exit;
869 +                       } else {
870 +                               /* Set max possible power level */
871 +                               max_pl = pl;
872 +                               min_pl = pow;
873 +                       }
874 +               }
875 +
876 +               rval = register_pid(current->pid);
877 +
878 +               if (rval)
879 +                       goto exit;
880 +
881 +               /* Lower min power level if asked */
882 +               if (pow < min_pl)
883 +                       min_pl = pow;
884 +               else
885 +                       pow = min_pl;
886 +
887 +       } else { /* RELEASE_LOW_POWER */
888 +               rval = unregister_pid(current->pid);
889 +
890 +               if (rval)
891 +                       goto exit;
892 +
893 +               if (pid_count == 0) {
894 +                       if (pow > max_pl)
895 +                               pow = max_pl;
896 +               }
897 +       }
898 +       rval = si4713_set_power_level(si4713_dev, pow);
899 +exit:
900 +       return rval;
901 +}
902 +
903  /*
904   * si4713_fops - file operations interface
905   */
906  static const struct file_operations si4713_fops = {
907         .owner          = THIS_MODULE,
908         .llseek         = no_llseek,
909 -       .ioctl          = video_ioctl2,
910 +       .ioctl          = si4713_priv_ioctl,
911         .compat_ioctl   = v4l_compat_ioctl32,
912  };
913  
914 @@ -747,6 +906,9 @@
915                 goto free_sysfs;
916         }
917  
918 +       /* save to global pointer for it to be accesible from ioctl() call */
919 +       si4713_dev = sdev;
920 +
921         return 0;
922  
923  free_sysfs:
924 --- kernel-power-2.6.28.orig/drivers/media/radio/radio-si4713.h
925 +++ kernel-power-2.6.28/drivers/media/radio/radio-si4713.h
926 @@ -21,6 +21,9 @@
927  #define SI4713_I2C_ADDR_BUSEN_HIGH     0x63
928  #define SI4713_I2C_ADDR_BUSEN_LOW      0x11
929  
930 +#define LOCK_LOW_POWER         _IOW('v', BASE_VIDIOC_PRIVATE + 0, unsigned int)
931 +#define RELEASE_LOW_POWER      _IOW('v', BASE_VIDIOC_PRIVATE + 1, unsigned int)
932 +
933  /*
934   * Platform dependent definition
935   */
936 --- kernel-power-2.6.28.orig/drivers/media/video/omap34xxcam.c
937 +++ kernel-power-2.6.28/drivers/media/video/omap34xxcam.c
938 @@ -1833,6 +1833,7 @@
939         struct omap34xxcam_videodev *vdev = fh->vdev;
940         struct device *isp = vdev->cam->isp;
941         int i;
942 +       int streamoff = 0;
943  
944         if (omap34xxcam_daemon_release(vdev, file))
945                 goto daemon_out;
946 @@ -1844,6 +1845,7 @@
947                 omap34xxcam_slave_power_set(vdev, V4L2_POWER_STANDBY,
948                                             OMAP34XXCAM_SLAVE_POWER_ALL);
949                 vdev->streaming = NULL;
950 +               streamoff = 1;
951         }
952  
953         if (atomic_dec_return(&vdev->users) == 0) {
954 @@ -1853,6 +1855,10 @@
955         }
956         mutex_unlock(&vdev->mutex);
957  
958 +       if (streamoff)
959 +               omap34xxcam_daemon_req_hw_reconfig(
960 +                       vdev, OMAP34XXCAM_DAEMON_HW_RECONFIG_STREAMOFF);
961 +
962  daemon_out:
963         file->private_data = NULL;
964  
965 --- kernel-power-2.6.28.orig/drivers/mmc/host/omap_hsmmc.c
966 +++ kernel-power-2.6.28/drivers/mmc/host/omap_hsmmc.c
967 @@ -115,6 +115,7 @@
968  /* Timeouts for entering power saving states on inactivity, msec */
969  #define OMAP_MMC_DISABLED_TIMEOUT      100
970  #define OMAP_MMC_SLEEP_TIMEOUT         1000
971 +#define OMAP_MMC_OFF_NOSLP_TIMEOUT     3000
972  #define OMAP_MMC_OFF_TIMEOUT           8000
973  
974  /*
975 @@ -1249,21 +1250,21 @@
976  
977  /*
978   * Dynamic power saving handling, FSM:
979 - *   ENABLED -> DISABLED -> CARDSLEEP / REGSLEEP -> OFF
980 - *     ^___________|          |                      |
981 - *     |______________________|______________________|
982 + *   ENABLED -> DISABLED -> EXTDISABLED / CARDSLEEP / REGSLEEP -> OFF
983 + *     ^___________|                        |                      |
984 + *     |____________________________________|______________________|
985   *
986 - * ENABLED:   mmc host is fully functional
987 - * DISABLED:  fclk is off
988 - * CARDSLEEP: fclk is off, card is asleep, voltage regulator is asleep
989 - * REGSLEEP:  fclk is off, voltage regulator is asleep
990 - * OFF:       fclk is off, voltage regulator is off
991 + * ENABLED:       mmc host is fully functional
992 + * (EXT)DISABLED: fclk is off
993 + * CARDSLEEP:     fclk is off, card is asleep, voltage regulator is asleep
994 + * REGSLEEP:      fclk is off, voltage regulator is asleep
995 + * OFF:           fclk is off, voltage regulator is off
996   *
997   * Transition handlers return the timeout for the next state transition
998   * or negative error.
999   */
1000  
1001 -enum {ENABLED = 0, DISABLED, CARDSLEEP, REGSLEEP, OFF};
1002 +enum {ENABLED = 0, DISABLED, EXTDISABLED, CARDSLEEP, REGSLEEP, OFF};
1003  
1004  /* Handler for [ENABLED -> DISABLED] transition */
1005  static int omap_hsmmc_enabled_to_disabled(struct omap_hsmmc_host *host)
1006 @@ -1300,7 +1301,21 @@
1007         return 1;
1008  }
1009  
1010 -/* Handler for [DISABLED -> REGSLEEP / CARDSLEEP] transition */
1011 +/* Big SD cards (16GiB) are prohibited from
1012 +   switching voltage regulator to asleep
1013 +   because of high current consumption */
1014 +static int omap_hsmmc_support_sleep(struct mmc_host *mmc)
1015 +{
1016 +       if (!(mmc->caps & MMC_CAP_NONREMOVABLE) &&
1017 +           ((u64)mmc->card->csd.capacity << mmc->card->csd.read_blkbits) >
1018 +           14ULL * 1024 * 1024 * 1024) {
1019 +               return 0;
1020 +       }
1021 +
1022 +       return 1;
1023 +}
1024 +
1025 +/* Handler for [DISABLED -> EXTDISABLED / REGSLEEP / CARDSLEEP] transition */
1026  static int omap_hsmmc_disabled_to_sleep(struct omap_hsmmc_host *host)
1027  {
1028         int err, new_state, sleep;
1029 @@ -1319,12 +1334,12 @@
1030                 }
1031                 new_state = CARDSLEEP;
1032         } else {
1033 -               new_state = REGSLEEP;
1034 +               new_state = omap_hsmmc_support_sleep(host->mmc) ? REGSLEEP : EXTDISABLED;
1035         }
1036  
1037         sleep = omap_hsmmc_full_sleep(host->mmc->card) &&
1038                 (new_state == CARDSLEEP);
1039 -       if (mmc_slot(host).set_sleep)
1040 +       if (mmc_slot(host).set_sleep && new_state != EXTDISABLED)
1041                 mmc_slot(host).set_sleep(host->dev, host->slot_id, 1, 0,
1042                                         sleep);
1043         /* FIXME: turn off bus power and perhaps interrupts too */
1044 @@ -1334,18 +1349,20 @@
1045         mmc_release_host(host->mmc);
1046  
1047         dev_dbg(mmc_dev(host->mmc), "DISABLED -> %s\n",
1048 -               host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP");
1049 +               host->dpm_state == CARDSLEEP ? "CARDSLEEP" :
1050 +               host->dpm_state == REGSLEEP ?  "REGSLEEP" : "EXTDISABLED");
1051  
1052         if ((host->mmc->caps & MMC_CAP_NONREMOVABLE) ||
1053             mmc_slot(host).card_detect ||
1054             (mmc_slot(host).get_cover_state &&
1055              mmc_slot(host).get_cover_state(host->dev, host->slot_id)))
1056 -               return msecs_to_jiffies(OMAP_MMC_OFF_TIMEOUT);
1057 +               return msecs_to_jiffies(new_state == EXTDISABLED ?
1058 +                      OMAP_MMC_OFF_NOSLP_TIMEOUT : OMAP_MMC_OFF_TIMEOUT);
1059  
1060         return 0;
1061  }
1062  
1063 -/* Handler for [REGSLEEP / CARDSLEEP -> OFF] transition */
1064 +/* Handler for [EXTDISABLED / REGSLEEP / CARDSLEEP -> OFF] transition */
1065  static int omap_hsmmc_sleep_to_off(struct omap_hsmmc_host *host)
1066  {
1067         if (!mmc_try_claim_host(host->mmc))
1068 @@ -1364,7 +1381,8 @@
1069         host->power_mode = MMC_POWER_OFF;
1070  
1071         dev_dbg(mmc_dev(host->mmc), "%s -> OFF\n",
1072 -               host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP");
1073 +               host->dpm_state == CARDSLEEP ? "CARDSLEEP" :
1074 +               host->dpm_state == REGSLEEP ?  "REGSLEEP" : "EXTDISABLED");
1075  
1076         host->dpm_state = OFF;
1077  
1078 @@ -1405,14 +1423,15 @@
1079         omap_hsmmc_context_restore(host);
1080         asleep = omap_hsmmc_full_sleep(host->mmc->card) &&
1081                 (host->dpm_state == CARDSLEEP);
1082 -       if (mmc_slot(host).set_sleep)
1083 +       if (mmc_slot(host).set_sleep && host->dpm_state != EXTDISABLED)
1084                 mmc_slot(host).set_sleep(host->dev, host->slot_id, 0,
1085                                         host->vdd, asleep);
1086         if (mmc_card_can_sleep(host->mmc))
1087                 mmc_card_awake(host->mmc);
1088  
1089         dev_dbg(mmc_dev(host->mmc), "%s -> ENABLED\n",
1090 -               host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP");
1091 +               host->dpm_state == CARDSLEEP ? "CARDSLEEP" :
1092 +               host->dpm_state == REGSLEEP ?  "REGSLEEP" : "EXTDISABLED");
1093  
1094         if (host->pdata->set_pm_constraints)
1095                 host->pdata->set_pm_constraints(host->dev, 1);
1096 @@ -1454,6 +1473,7 @@
1097         switch (host->dpm_state) {
1098         case DISABLED:
1099                 return omap_hsmmc_disabled_to_enabled(host);
1100 +       case EXTDISABLED:
1101         case CARDSLEEP:
1102         case REGSLEEP:
1103                 return omap_hsmmc_sleep_to_enabled(host);
1104 @@ -1484,6 +1504,7 @@
1105         }
1106         case DISABLED:
1107                 return omap_hsmmc_disabled_to_sleep(host);
1108 +       case EXTDISABLED:
1109         case CARDSLEEP:
1110         case REGSLEEP:
1111                 return omap_hsmmc_sleep_to_off(host);
1112 --- kernel-power-2.6.28.orig/drivers/net/wireless/wl12xx/wl1251_acx.c
1113 +++ kernel-power-2.6.28/drivers/net/wireless/wl12xx/wl1251_acx.c
1114 @@ -910,7 +910,7 @@
1115         }
1116  
1117         *mactime = tsf_info->current_tsf_lsb |
1118 -               (tsf_info->current_tsf_msb << 31);
1119 +               ((unsigned long long) tsf_info->current_tsf_msb << 32);
1120  
1121  out:
1122         kfree(tsf_info);
1123 --- kernel-power-2.6.28.orig/drivers/net/wireless/wl12xx/wl1251_cmd.c
1124 +++ kernel-power-2.6.28/drivers/net/wireless/wl12xx/wl1251_cmd.c
1125 @@ -242,7 +242,7 @@
1126         if (ret < 0) {
1127                 wl1251_error("tx %s cmd for channel %d failed",
1128                              enable ? "start" : "stop", channel);
1129 -               return ret;
1130 +               goto out;
1131         }
1132  
1133         wl1251_debug(DEBUG_BOOT, "tx %s cmd channel %d",
1134 --- kernel-power-2.6.28.orig/drivers/usb/musb/musb_core.c
1135 +++ kernel-power-2.6.28/drivers/usb/musb/musb_core.c
1136 @@ -297,28 +297,23 @@
1137                         break;
1138         }
1139  
1140 -       if (vdat) {
1141 -               /* REVISIT: This code works only with dedicated chargers!
1142 -                * When support for HOST/HUB chargers is added, don't
1143 -                * forget this.
1144 -                */
1145 +       /* enable interrupts */
1146 +       musb_writeb(musb->mregs, MUSB_INTRUSBE, ctx.intrusbe);
1147 +
1148 +       /* Make sure the communication starts normally */
1149 +       r = musb_readb(musb->mregs, MUSB_POWER);
1150 +       musb_writeb(musb->mregs, MUSB_POWER,
1151 +                       r | MUSB_POWER_RESUME);
1152 +       msleep(10);
1153 +       musb_writeb(musb->mregs, MUSB_POWER,
1154 +                       r & ~MUSB_POWER_RESUME);
1155 +       if (vdat && musb->xceiv->state != OTG_STATE_B_IDLE) {
1156                 musb_stop(musb);
1157                 /* Regulators off */
1158                 otg_set_suspend(musb->xceiv, 1);
1159 -               musb->is_charger = 1;
1160 -       } else {
1161 -               /* enable interrupts */
1162 -               musb_writeb(musb->mregs, MUSB_INTRUSBE, ctx.intrusbe);
1163 -
1164 -               /* Make sure the communication starts normally */
1165 -               r = musb_readb(musb->mregs, MUSB_POWER);
1166 -               musb_writeb(musb->mregs, MUSB_POWER,
1167 -                               r | MUSB_POWER_RESUME);
1168 -               msleep(10);
1169 -               musb_writeb(musb->mregs, MUSB_POWER,
1170 -                               r & ~MUSB_POWER_RESUME);
1171         }
1172  
1173 +       musb->is_charger = vdat;
1174         check_charger = 0;
1175  
1176         return vdat;
1177 --- kernel-power-2.6.28.orig/include/linux/sched.h
1178 +++ kernel-power-2.6.28/include/linux/sched.h
1179 @@ -1665,11 +1665,11 @@
1180  static inline void wake_up_idle_cpu(int cpu) { }
1181  #endif
1182  
1183 +extern unsigned int sysctl_sched_child_runs_first;
1184  #ifdef CONFIG_SCHED_DEBUG
1185  extern unsigned int sysctl_sched_latency;
1186  extern unsigned int sysctl_sched_min_granularity;
1187  extern unsigned int sysctl_sched_wakeup_granularity;
1188 -extern unsigned int sysctl_sched_child_runs_first;
1189  extern unsigned int sysctl_sched_features;
1190  extern unsigned int sysctl_sched_migration_cost;
1191  extern unsigned int sysctl_sched_nr_migrate;
1192 --- kernel-power-2.6.28.orig/include/linux/swap.h
1193 +++ kernel-power-2.6.28/include/linux/swap.h
1194 @@ -130,6 +130,17 @@
1195  #define SWAP_MAP_MAX   0x7fff
1196  #define SWAP_MAP_BAD   0x8000
1197  
1198 +#define SWAP_GAP_TREE_SIZE 10
1199 +#define SWAP_GAP_RESCAN_TIMEO_MSEC 2000
1200 +#define swap_gap_len(gap) ((gap)->end - (gap)->next)
1201 +#define swap_gap_rb_entry(node) rb_entry(node, struct swap_gap_node, rb_node)
1202 +/* Struct to store gaps info */
1203 +struct swap_gap_node {
1204 +       struct rb_node rb_node;
1205 +       unsigned int next;
1206 +       unsigned int end;
1207 +};
1208 +
1209  /*
1210   * The in-memory structure used to track swap areas.
1211   */
1212 @@ -157,6 +168,9 @@
1213         unsigned int gap_next;
1214         unsigned int gap_end;
1215         unsigned int gaps_exist;
1216 +       struct rb_root gaps_tree;
1217 +       struct swap_gap_node *gap_pool_arr;
1218 +       unsigned long gap_last_scan;
1219         unsigned int lowest_bit;
1220         unsigned int highest_bit;
1221         unsigned int cluster_next;
1222 --- kernel-power-2.6.28.orig/include/net/bluetooth/sco.h
1223 +++ kernel-power-2.6.28/include/net/bluetooth/sco.h
1224 @@ -29,7 +29,7 @@
1225  #define SCO_DEFAULT_MTU                500
1226  #define SCO_DEFAULT_FLUSH_TO   0xFFFF
1227  
1228 -#define SCO_CONN_TIMEOUT       (HZ * 40)
1229 +#define SCO_CONN_TIMEOUT       (HZ * 25)
1230  #define SCO_DISCONN_TIMEOUT    (HZ * 2)
1231  #define SCO_CONN_IDLE_TIMEOUT  (HZ * 60)
1232  
1233 --- kernel-power-2.6.28.orig/kernel/sched_fair.c
1234 +++ kernel-power-2.6.28/kernel/sched_fair.c
1235 @@ -48,10 +48,10 @@
1236  static unsigned int sched_nr_latency = 5;
1237  
1238  /*
1239 - * After fork, child runs first. (default) If set to 0 then
1240 + * After fork, child runs first. If set to 0 then
1241   * parent will (try to) run first.
1242   */
1243 -const_debug unsigned int sysctl_sched_child_runs_first = 1;
1244 +unsigned int sysctl_sched_child_runs_first __read_mostly;
1245  
1246  /*
1247   * sys_sched_yield() compat mode
1248 --- kernel-power-2.6.28.orig/kernel/sysctl.c
1249 +++ kernel-power-2.6.28/kernel/sysctl.c
1250 @@ -235,6 +235,14 @@
1251  #endif
1252  
1253  static struct ctl_table kern_table[] = {
1254 +       {
1255 +               .ctl_name       = CTL_UNNUMBERED,
1256 +               .procname       = "sched_child_runs_first",
1257 +               .data           = &sysctl_sched_child_runs_first,
1258 +               .maxlen         = sizeof(unsigned int),
1259 +               .mode           = 0644,
1260 +               .proc_handler   = &proc_dointvec,
1261 +       },
1262  #ifdef CONFIG_SCHED_DEBUG
1263         {
1264                 .ctl_name       = CTL_UNNUMBERED,
1265 @@ -289,14 +297,6 @@
1266         },
1267         {
1268                 .ctl_name       = CTL_UNNUMBERED,
1269 -               .procname       = "sched_child_runs_first",
1270 -               .data           = &sysctl_sched_child_runs_first,
1271 -               .maxlen         = sizeof(unsigned int),
1272 -               .mode           = 0644,
1273 -               .proc_handler   = &proc_dointvec,
1274 -       },
1275 -       {
1276 -               .ctl_name       = CTL_UNNUMBERED,
1277                 .procname       = "sched_features",
1278                 .data           = &sysctl_sched_features,
1279                 .maxlen         = sizeof(unsigned int),
1280 --- kernel-power-2.6.28.orig/mm/swapfile.c
1281 +++ kernel-power-2.6.28/mm/swapfile.c
1282 @@ -996,11 +996,55 @@
1283         spin_unlock(&mmlist_lock);
1284  }
1285  
1286 +void gaps_rbtree_insert(struct swap_info_struct *sis,
1287 +                       struct swap_gap_node *node)
1288 +{
1289 +       struct rb_node **p = &sis->gaps_tree.rb_node;
1290 +       struct rb_node *parent = NULL;
1291 +       struct swap_gap_node *tmp;
1292 +
1293 +       while (*p) {
1294 +               parent = *p;
1295 +               tmp = rb_entry(parent, struct swap_gap_node, rb_node);
1296 +               if (swap_gap_len(node) < swap_gap_len(tmp))
1297 +                       p = &(*p)->rb_left;
1298 +               else
1299 +                       p = &(*p)->rb_right;
1300 +       }
1301 +       rb_link_node(&node->rb_node, parent, p);
1302 +       rb_insert_color(&node->rb_node, &sis->gaps_tree);
1303 +}
1304 +
1305 +void gaps_rbtree_add(struct swap_info_struct *sis,
1306 +                               unsigned int next, unsigned int end,
1307 +                               struct swap_gap_node **gap_min, int *pos)
1308 +{
1309 +       struct swap_gap_node *gap_node;
1310 +       if (*pos < SWAP_GAP_TREE_SIZE) {
1311 +               gap_node = &sis->gap_pool_arr[*pos];
1312 +               *pos += 1;
1313 +       } else if (swap_gap_len(*gap_min) > end - next) {
1314 +               return;
1315 +       } else {
1316 +               gap_node = *gap_min;
1317 +               rb_erase(&gap_node->rb_node, &sis->gaps_tree);
1318 +               *gap_min = swap_gap_rb_entry(rb_first(&sis->gaps_tree));
1319 +       }
1320 +       gap_node->next = next;
1321 +       gap_node->end = end;
1322 +       if (gap_min && (*gap_min == NULL ||
1323 +                       swap_gap_len(*gap_min) > swap_gap_len(gap_node)))
1324 +               *gap_min = gap_node;
1325 +       gaps_rbtree_insert(sis, gap_node);
1326 +}
1327 +
1328  /* Find the largest sequence of free pages */
1329  int find_gap(struct swap_info_struct *sis)
1330  {
1331         unsigned i, uninitialized_var(start), uninitialized_var(gap_next);
1332 -       unsigned uninitialized_var(gap_end), gap_size = 0;
1333 +       unsigned uninitialized_var(gap_end);
1334 +       struct swap_gap_node *gap_max, *gap_min = NULL;
1335 +       int pos = 0;
1336         int in_gap = 0;
1337  
1338         spin_unlock(&sis->remap_lock);
1339 @@ -1017,6 +1061,11 @@
1340                 mutex_unlock(&sis->remap_mutex);
1341                 return -1;
1342         }
1343 +       if (time_after(jiffies, sis->gap_last_scan +
1344 +                       msecs_to_jiffies(SWAP_GAP_RESCAN_TIMEO_MSEC)))
1345 +               sis->gaps_tree = RB_ROOT;
1346 +       if (!RB_EMPTY_ROOT(&sis->gaps_tree))
1347 +               goto out;
1348         spin_unlock(&sis->remap_lock);
1349  
1350         /*
1351 @@ -1028,11 +1077,7 @@
1352                 if (in_gap) {
1353                         if (!(sis->swap_remap[i] & 0x80000000))
1354                                 continue;
1355 -                       if (i - start > gap_size) {
1356 -                               gap_next = start;
1357 -                               gap_end = i - 1;
1358 -                               gap_size = i - start;
1359 -                       }
1360 +                       gaps_rbtree_add(sis, start, i - 1, &gap_min, &pos);
1361                         in_gap = 0;
1362                 } else {
1363                         if (sis->swap_remap[i] & 0x80000000)
1364 @@ -1043,13 +1088,14 @@
1365                 cond_resched();
1366         }
1367         spin_lock(&sis->remap_lock);
1368 -       if (in_gap && i - start > gap_size) {
1369 -               sis->gap_next = start;
1370 -               sis->gap_end = i - 1;
1371 -       } else {
1372 -               sis->gap_next = gap_next;
1373 -               sis->gap_end = gap_end;
1374 -       }
1375 +       if (in_gap)
1376 +               gaps_rbtree_add(sis, start, i - 1, &gap_min, &pos);
1377 +       sis->gap_last_scan = jiffies;
1378 +out:
1379 +       gap_max = swap_gap_rb_entry(rb_last(&sis->gaps_tree));
1380 +       rb_erase(&gap_max->rb_node, &sis->gaps_tree);
1381 +       sis->gap_next = gap_max->next;
1382 +       sis->gap_end = gap_max->end;
1383         mutex_unlock(&sis->remap_mutex);
1384         return 0;
1385  }
1386 @@ -1471,6 +1517,7 @@
1387         p->flags = 0;
1388         spin_unlock(&swap_lock);
1389         mutex_unlock(&swapon_mutex);
1390 +       kfree(p->gap_pool_arr);
1391         vfree(p->swap_remap);
1392         vfree(swap_map);
1393         inode = mapping->host;
1394 @@ -1825,6 +1872,14 @@
1395                 goto bad_swap;
1396         }
1397  
1398 +       p->gap_pool_arr = kmalloc(sizeof(struct swap_gap_node)*
1399 +                               SWAP_GAP_TREE_SIZE, GFP_KERNEL);
1400 +       if (!p->gap_pool_arr) {
1401 +               error = -ENOMEM;
1402 +               goto bad_swap;
1403 +       }
1404 +       p->gaps_tree = RB_ROOT;
1405 +
1406         mutex_lock(&swapon_mutex);
1407         spin_lock(&swap_lock);
1408         if (swap_flags & SWAP_FLAG_PREFER)
1409 --- kernel-power-2.6.28.orig/net/bluetooth/hci_conn.c
1410 +++ kernel-power-2.6.28/net/bluetooth/hci_conn.c
1411 @@ -375,6 +375,9 @@
1412  
1413         if (acl->state == BT_CONNECTED &&
1414                         (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1415 +               acl->power_save = 1;
1416 +               hci_conn_enter_active_mode(acl);
1417 +
1418                 if (lmp_esco_capable(hdev))
1419                         hci_setup_sync(sco, acl->handle);
1420                 else
1421 --- kernel-power-2.6.28.orig/net/bluetooth/hci_event.c
1422 +++ kernel-power-2.6.28/net/bluetooth/hci_event.c
1423 @@ -1056,6 +1056,8 @@
1424         if (conn) {
1425                 if (!ev->status)
1426                         conn->link_mode |= HCI_LM_AUTH;
1427 +               else
1428 +                       conn->sec_level = BT_SECURITY_LOW;
1429  
1430                 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1431  
1432 @@ -1709,6 +1711,7 @@
1433                 break;
1434  
1435         case 0x1c:      /* SCO interval rejected */
1436 +       case 0x1a:      /* Unsupported Remote Feature */
1437         case 0x1f:      /* Unspecified error */
1438                 if (conn->out && conn->attempt < 2) {
1439                         conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |