1 --- kernel-power-2.6.28.orig/arch/arm/include/asm/cacheflush.h
2 +++ kernel-power-2.6.28/arch/arm/include/asm/cacheflush.h
4 * Please note that the implementation of these, and the required
5 * effects are cache-type (VIVT/VIPT/PIPT) specific.
7 - * flush_cache_kern_all()
10 * Unconditionally clean and invalidate the entire cache.
12 - * flush_cache_user_mm(mm)
15 * Clean and invalidate all user space cache entries
16 * before a change of page tables.
18 - * flush_cache_user_range(start, end, flags)
19 + * flush_user_range(start, end, flags)
21 * Clean and invalidate a range of cache entries in the
22 * specified address space before a change of page tables.
24 * - start - virtual start address
25 * - end - virtual end address
27 + * coherent_user_range(start, end)
29 + * Ensure coherency between the Icache and the Dcache in the
30 + * region described by start, end. If you have non-snooping
31 + * Harvard caches, you need to implement this function.
32 + * - start - virtual start address
33 + * - end - virtual end address
35 + * flush_kern_dcache_area(kaddr, size)
37 + * Ensure that the data held in page is written back.
38 + * - kaddr - page address
39 + * - size - region size
45 * Harvard caches are synchronised for the user space address range.
46 * This is used for the ARM private sys_cacheflush system call.
48 -#define flush_cache_user_range(vma,start,end) \
49 +#define flush_cache_user_range(start,end) \
50 __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
53 --- kernel-power-2.6.28.orig/arch/arm/kernel/traps.c
54 +++ kernel-power-2.6.28/arch/arm/kernel/traps.c
56 if (end > vma->vm_end)
59 - flush_cache_user_range(vma, start, end);
60 + up_read(&mm->mmap_sem);
61 + flush_cache_user_range(start, end);
64 up_read(&mm->mmap_sem);
66 --- kernel-power-2.6.28.orig/arch/arm/mach-omap2/smartreflex.c
67 +++ kernel-power-2.6.28/arch/arm/mach-omap2/smartreflex.c
72 - if (sr->is_autocomp_active) {
73 + if (sr->is_autocomp_active && !sr->is_sr_reset) {
74 WARN(1, "SR: Must not transmit VCBYPASS command while SR is "
77 --- kernel-power-2.6.28.orig/arch/arm/mm/fault.c
78 +++ kernel-power-2.6.28/arch/arm/mm/fault.c
81 return do_page_fault(addr, fsr, regs);
83 + if (user_mode(regs))
86 index = pgd_index(addr);
90 { do_bad, SIGILL, BUS_ADRALN, "alignment exception" },
91 { do_bad, SIGKILL, 0, "terminal exception" },
92 { do_bad, SIGILL, BUS_ADRALN, "alignment exception" },
93 +/* Do we need runtime check ? */
94 +#if __LINUX_ARM_ARCH__ < 6
95 { do_bad, SIGBUS, 0, "external abort on linefetch" },
97 + { do_translation_fault, SIGSEGV, SEGV_MAPERR, "I-cache maintenance fault" },
99 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" },
100 { do_bad, SIGBUS, 0, "external abort on linefetch" },
101 { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" },
102 --- kernel-power-2.6.28.orig/arch/arm/mm/mmu.c
103 +++ kernel-power-2.6.28/arch/arm/mm/mmu.c
105 pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
106 flush_pmd_entry(pmd);
109 + local_flush_tlb_all();
111 --- kernel-power-2.6.28.orig/arch/arm/mm/proc-v6.S
112 +++ kernel-power-2.6.28/arch/arm/mm/proc-v6.S
114 * to what would be the reset vector.
116 * - loc - location to jump to for soft reset
118 - * It is assumed that:
122 --- kernel-power-2.6.28.orig/arch/arm/mm/proc-v7.S
123 +++ kernel-power-2.6.28/arch/arm/mm/proc-v7.S
125 ENDPROC(cpu_v7_proc_init)
127 ENTRY(cpu_v7_proc_fin)
130 + cpsid if @ disable interrupts
131 + bl v7_flush_kern_cache_all
132 + mrc p15, 0, r0, c1, c0, 0 @ ctrl register
133 + bic r0, r0, #0x1000 @ ...i............
134 + bic r0, r0, #0x0006 @ .............ca.
135 + mcr p15, 0, r0, c1, c0, 0 @ disable caches
137 ENDPROC(cpu_v7_proc_fin)
141 * to what would be the reset vector.
143 * - loc - location to jump to for soft reset
145 - * It is assumed that:
149 --- kernel-power-2.6.28.orig/block/cfq-iosched.c
150 +++ kernel-power-2.6.28/block/cfq-iosched.c
153 struct cfq_rb_root service_tree;
154 unsigned int busy_queues;
156 + * Used to track any pending rt requests so we can pre-empt current
157 + * non-RT cfqq in service when this value is non-zero.
159 + unsigned int busy_rt_queues;
165 unsigned long slice_end;
167 + unsigned int slice_dispatch;
169 /* pending metadata requests */
171 @@ -171,13 +177,12 @@
172 enum cfqq_state_flags {
173 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
174 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
175 + CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */
176 CFQ_CFQQ_FLAG_must_alloc, /* must be allowed rq alloc */
177 CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
178 - CFQ_CFQQ_FLAG_must_dispatch, /* must dispatch, even if expired */
179 CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
180 CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */
181 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
182 - CFQ_CFQQ_FLAG_queue_new, /* queue never been serviced */
183 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
184 CFQ_CFQQ_FLAG_sync, /* synchronous queue */
186 @@ -198,13 +203,12 @@
189 CFQ_CFQQ_FNS(wait_request);
190 +CFQ_CFQQ_FNS(must_dispatch);
191 CFQ_CFQQ_FNS(must_alloc);
192 CFQ_CFQQ_FNS(must_alloc_slice);
193 -CFQ_CFQQ_FNS(must_dispatch);
194 CFQ_CFQQ_FNS(fifo_expire);
195 CFQ_CFQQ_FNS(idle_window);
196 CFQ_CFQQ_FNS(prio_changed);
197 -CFQ_CFQQ_FNS(queue_new);
198 CFQ_CFQQ_FNS(slice_new);
202 BUG_ON(cfq_cfqq_on_rr(cfqq));
203 cfq_mark_cfqq_on_rr(cfqq);
205 + if (cfq_class_rt(cfqq))
206 + cfqd->busy_rt_queues++;
208 cfq_resort_rr_list(cfqd, cfqq);
212 BUG_ON(!cfqd->busy_queues);
214 + if (cfq_class_rt(cfqq))
215 + cfqd->busy_rt_queues--;
219 @@ -765,10 +773,15 @@
221 cfq_log_cfqq(cfqd, cfqq, "set_active");
223 + cfqq->slice_dispatch = 0;
225 + cfq_clear_cfqq_wait_request(cfqq);
226 + cfq_clear_cfqq_must_dispatch(cfqq);
227 cfq_clear_cfqq_must_alloc_slice(cfqq);
228 cfq_clear_cfqq_fifo_expire(cfqq);
229 cfq_mark_cfqq_slice_new(cfqq);
230 - cfq_clear_cfqq_queue_new(cfqq);
232 + del_timer(&cfqd->idle_slice_timer);
235 cfqd->active_queue = cfqq;
237 if (cfq_cfqq_wait_request(cfqq))
238 del_timer(&cfqd->idle_slice_timer);
240 - cfq_clear_cfqq_must_dispatch(cfqq);
241 cfq_clear_cfqq_wait_request(cfqq);
245 (sample_valid(cic->ttime_samples) && cic->ttime_mean > 2))
248 - cfq_mark_cfqq_must_dispatch(cfqq);
249 cfq_mark_cfqq_wait_request(cfqq);
252 @@ -1001,10 +1012,24 @@
254 * The active queue has run out of time, expire it and select new.
256 - if (cfq_slice_used(cfqq))
257 + if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq))
261 + * If we have a RT cfqq waiting, then we pre-empt the current non-rt
264 + if (!cfq_class_rt(cfqq) && cfqd->busy_rt_queues) {
266 + * We simulate this as cfqq timed out so that it gets to bank
267 + * the remaining of its time slice.
269 + cfq_log_cfqq(cfqd, cfqq, "preempt");
270 + cfq_slice_expired(cfqd, 1);
275 * The active queue has requests and isn't expired, allow it to
278 @@ -1030,59 +1055,6 @@
283 - * Dispatch some requests from cfqq, moving them to the request queue
287 -__cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
290 - int dispatched = 0;
292 - BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
295 - struct request *rq;
298 - * follow expired path, else get first next available
300 - rq = cfq_check_fifo(cfqq);
302 - rq = cfqq->next_rq;
305 - * finally, insert request into driver dispatch list
307 - cfq_dispatch_insert(cfqd->queue, rq);
311 - if (!cfqd->active_cic) {
312 - atomic_inc(&RQ_CIC(rq)->ioc->refcount);
313 - cfqd->active_cic = RQ_CIC(rq);
316 - if (RB_EMPTY_ROOT(&cfqq->sort_list))
319 - } while (dispatched < max_dispatch);
322 - * expire an async queue immediately if it has used up its slice. idle
323 - * queue always expire after 1 dispatch round.
325 - if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
326 - dispatched >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
327 - cfq_class_idle(cfqq))) {
328 - cfqq->slice_end = jiffies + 1;
329 - cfq_slice_expired(cfqd, 0);
335 static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
338 @@ -1116,11 +1088,45 @@
343 + * Dispatch a request from cfqq, moving them to the request queue
346 +static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
348 + struct request *rq;
350 + BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
353 + * follow expired path, else get first next available
355 + rq = cfq_check_fifo(cfqq);
357 + rq = cfqq->next_rq;
360 + * insert request into driver dispatch list
362 + cfq_dispatch_insert(cfqd->queue, rq);
364 + if (!cfqd->active_cic) {
365 + struct cfq_io_context *cic = RQ_CIC(rq);
367 + atomic_inc(&cic->ioc->refcount);
368 + cfqd->active_cic = cic;
373 + * Find the cfqq that we need to service and move a request from that to the
376 static int cfq_dispatch_requests(struct request_queue *q, int force)
378 struct cfq_data *cfqd = q->elevator->elevator_data;
379 struct cfq_queue *cfqq;
381 + unsigned int max_dispatch;
383 if (!cfqd->busy_queues)
385 @@ -1128,33 +1134,63 @@
387 return cfq_forced_dispatch(cfqd);
390 - while ((cfqq = cfq_select_queue(cfqd)) != NULL) {
392 + cfqq = cfq_select_queue(cfqd);
397 + * If this is an async queue and we have sync IO in flight, let it wait
399 + if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
402 + max_dispatch = cfqd->cfq_quantum;
403 + if (cfq_class_idle(cfqq))
406 - max_dispatch = cfqd->cfq_quantum;
408 + * Does this cfqq already have too much IO in flight?
410 + if (cfqq->dispatched >= max_dispatch) {
412 + * idle queue must always only have a single IO in flight
414 if (cfq_class_idle(cfqq))
418 - if (cfqq->dispatched >= max_dispatch) {
419 - if (cfqd->busy_queues > 1)
421 - if (cfqq->dispatched >= 4 * max_dispatch)
425 + * We have other queues, don't allow more IO from this one
427 + if (cfqd->busy_queues > 1)
430 - if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
433 + * we are the only queue, allow up to 4 times of 'quantum'
435 + if (cfqq->dispatched >= 4 * max_dispatch)
439 - cfq_clear_cfqq_must_dispatch(cfqq);
440 - cfq_clear_cfqq_wait_request(cfqq);
441 - del_timer(&cfqd->idle_slice_timer);
443 + * Dispatch a request from this cfqq
445 + cfq_dispatch_request(cfqd, cfqq);
446 + cfqq->slice_dispatch++;
447 + cfq_clear_cfqq_must_dispatch(cfqq);
449 - dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
451 + * expire an async queue immediately if it has used up its slice. idle
452 + * queue always expire after 1 dispatch round.
454 + if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
455 + cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
456 + cfq_class_idle(cfqq))) {
457 + cfqq->slice_end = jiffies + 1;
458 + cfq_slice_expired(cfqd, 0);
461 - cfq_log(cfqd, "dispatched=%d", dispatched);
463 + cfq_log(cfqd, "dispatched a request");
468 @@ -1318,7 +1354,15 @@
471 spin_lock_irqsave(q->queue_lock, flags);
472 - __cfq_exit_single_io_context(cfqd, cic);
475 + * Ensure we get a fresh copy of the ->key to prevent
476 + * race between exiting task and queue
478 + smp_read_barrier_depends();
480 + __cfq_exit_single_io_context(cfqd, cic);
482 spin_unlock_irqrestore(q->queue_lock, flags);
485 @@ -1472,7 +1516,6 @@
488 cfq_mark_cfqq_prio_changed(cfqq);
489 - cfq_mark_cfqq_queue_new(cfqq);
491 cfq_init_prio_data(cfqq, ioc);
493 @@ -1797,6 +1840,12 @@
494 if (rq_is_meta(rq) && !cfqq->meta_pending)
498 + * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
500 + if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
503 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
506 @@ -1853,23 +1902,28 @@
508 if (cfqq == cfqd->active_queue) {
510 - * if we are waiting for a request for this queue, let it rip
511 - * immediately and flag that we must not expire this queue
513 + * Remember that we saw a request from this process, but
514 + * don't start queuing just yet. Otherwise we risk seeing lots
515 + * of tiny requests, because we disrupt the normal plugging
516 + * and merging. If the request is already larger than a single
517 + * page, let it rip immediately. For that case we assume that
518 + * merging is already done.
520 if (cfq_cfqq_wait_request(cfqq)) {
521 + if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE) {
522 + del_timer(&cfqd->idle_slice_timer);
523 + blk_start_queueing(cfqd->queue);
525 cfq_mark_cfqq_must_dispatch(cfqq);
526 - del_timer(&cfqd->idle_slice_timer);
527 - blk_start_queueing(cfqd->queue);
529 } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
531 * not the active queue - expire current slice if it is
532 * idle and has expired it's mean thinktime or this new queue
533 - * has some old slice time left and is of higher priority
534 + * has some old slice time left and is of higher priority or
535 + * this new queue is RT and the current one is BE
537 cfq_preempt_queue(cfqd, cfqq);
538 - cfq_mark_cfqq_must_dispatch(cfqq);
539 blk_start_queueing(cfqd->queue);
542 @@ -2129,6 +2183,12 @@
546 + * We saw a request before the queue expired, let it through
548 + if (cfq_cfqq_must_dispatch(cfqq))
554 if (cfq_slice_used(cfqq))
555 @@ -2144,10 +2204,8 @@
557 * not expired and it has a request pending, let it dispatch
559 - if (!RB_EMPTY_ROOT(&cfqq->sort_list)) {
560 - cfq_mark_cfqq_must_dispatch(cfqq);
561 + if (!RB_EMPTY_ROOT(&cfqq->sort_list))
566 cfq_slice_expired(cfqd, timed_out);
567 --- kernel-power-2.6.28.orig/drivers/dsp/bridge/rmgr/drv.c
568 +++ kernel-power-2.6.28/drivers/dsp/bridge/rmgr/drv.c
569 @@ -517,11 +517,12 @@
571 pDMMList = pDMMList->next;
572 if (pDMMRes->dmmAllocated) {
573 - status = PROC_UnMap(pDMMRes->hProcessor,
574 - (void *)pDMMRes->ulDSPResAddr, pCtxt);
575 - status = PROC_UnReserveMemory(pDMMRes->hProcessor,
576 - (void *)pDMMRes->ulDSPResAddr);
577 - pDMMRes->dmmAllocated = 0;
578 + /* PROC_UnMap frees pDMMRes */
579 + void *processor = pDMMRes->hProcessor;
580 + void *map_addr = (void*)pDMMRes->ulDSPAddr;
581 + void *rsv_addr = (void*)pDMMRes->ulDSPResAddr;
582 + status = PROC_UnMap(processor, map_addr, pCtxt);
583 + status = PROC_UnReserveMemory(processor, rsv_addr);
587 --- kernel-power-2.6.28.orig/drivers/dsp/bridge/rmgr/proc.c
588 +++ kernel-power-2.6.28/drivers/dsp/bridge/rmgr/proc.c
597 --- kernel-power-2.6.28.orig/drivers/i2c/chips/lis302dl.c
598 +++ kernel-power-2.6.28/drivers/i2c/chips/lis302dl.c
600 # define LIS302_CTRL1_Y (1 << 1)
601 # define LIS302_CTRL1_X (1 << 0)
602 #define LIS302_CTRL_2 0x21
603 +# define LIS302_CTRL2_BOOT (1 << 6)
604 #define LIS302_CTRL_3 0x22
605 # define LIS302_CTRL3_GND 0x00
606 # define LIS302_CTRL3_FF_WU_1 0x01
612 - /* Control High Pass filter selection. not used */
614 + * Boot is used to refresh internal registers
615 + * Control High Pass filter selection. not used
617 + ret = lis302dl_write(c, LIS302_CTRL_2, LIS302_CTRL2_BOOT);
622 * Interrupt CTRL register. One interrupt pin is used for
623 --- kernel-power-2.6.28.orig/drivers/leds/leds-lp5523.c
624 +++ kernel-power-2.6.28/drivers/leds/leds-lp5523.c
626 #include <linux/wait.h>
627 #include <linux/leds.h>
628 #include <linux/leds-lp5523.h>
629 +#include <linux/workqueue.h>
631 #define LP5523_DRIVER_NAME "lp5523"
632 #define LP5523_REG_ENABLE 0x00
636 struct led_classdev cdev;
637 + struct work_struct brightness_work;
643 static void lp5523_work(struct work_struct *work);
644 static irqreturn_t lp5523_irq(int irq, void *_chip);
646 +static void lp5523_led_brightness_work(struct work_struct *work);
649 static int lp5523_write(struct i2c_client *client, u8 reg, u8 value)
652 enum led_brightness brightness)
654 struct lp5523_led *led = cdev_to_led(cdev);
655 + led->brightness = (u8)brightness;
657 + schedule_work(&led->brightness_work);
660 +static void lp5523_led_brightness_work(struct work_struct *work)
662 + struct lp5523_led *led = container_of(work,
665 struct lp5523_chip *chip = led_to_lp5523(led);
666 struct i2c_client *client = chip->client;
671 LP5523_REG_LED_PWM_BASE + led->led_nr,
675 mutex_unlock(&chip->lock);
678 dev_err(&client->dev, "error initializing leds\n");
681 + INIT_WORK(&(chip->leds[i].brightness_work),
682 + lp5523_led_brightness_work);
685 ret = lp5523_register_sysfs(client);
690 - for (i = 0; i < pdata->num_leds; i++)
691 + for (i = 0; i < pdata->num_leds; i++) {
692 led_classdev_unregister(&chip->leds[i].cdev);
693 + cancel_work_sync(&chip->leds[i].brightness_work);
700 lp5523_unregister_sysfs(client);
702 - for (i = 0; i < chip->num_leds; i++)
703 + for (i = 0; i < chip->num_leds; i++) {
704 led_classdev_unregister(&chip->leds[i].cdev);
705 + cancel_work_sync(&chip->leds[i].brightness_work);
710 --- kernel-power-2.6.28.orig/drivers/media/radio/radio-si4713.c
711 +++ kernel-power-2.6.28/drivers/media/radio/radio-si4713.c
713 /* module parameters */
714 static int radio_nr = -1; /* radio device minor (-1 ==> auto assign) */
716 +/* properties lock for write operations */
717 +static int config_locked;
719 +/* saved power levels */
720 +static unsigned int max_pl;
721 +static unsigned int min_pl;
723 +/* structure for pid registration */
726 + struct list_head plist;
729 +#define APP_MAX_NUM 2
731 +static int pid_count;
732 +static LIST_HEAD(pid_list_head);
733 +static struct si4713_device *si4713_dev;
737 * Read and write functions
739 si4713_##prop##_write);
742 + * Config lock property
744 +static ssize_t si4713_lock_write(struct device *dev,
745 + struct device_attribute *attr,
754 + sscanf(buf, "%d", &l);
762 +static ssize_t si4713_lock_read(struct device *dev,
763 + struct device_attribute *attr,
766 + return sprintf(buf, "%d\n", config_locked);
769 +static DEVICE_ATTR(lock, S_IRUGO | S_IWUSR, si4713_lock_read,
770 + si4713_lock_write);
773 * Power level property
775 /* power_level (rw) 88 - 115 or 0 */
787 value > MAX_TONE_OFF_TIME)
789 static struct attribute *attrs[] = {
790 + &dev_attr_lock.attr,
791 &dev_attr_power_level.attr,
792 &dev_attr_antenna_capacitor.attr,
793 &dev_attr_rds_pi.attr,
794 @@ -366,13 +420,118 @@
798 +static int register_pid(pid_t pid)
800 + struct pid_list *pitem;
802 + list_for_each_entry(pitem, &pid_list_head, plist) {
803 + if (pitem->pid == pid)
807 + pitem = kmalloc(sizeof(struct pid_list), GFP_KERNEL);
814 + list_add(&(pitem->plist), &pid_list_head);
820 +static int unregister_pid(pid_t pid)
822 + struct pid_list *pitem, *n;
824 + list_for_each_entry_safe(pitem, n, &pid_list_head, plist) {
825 + if (pitem->pid == pid) {
826 + list_del(&(pitem->plist));
837 +static int si4713_priv_ioctl(struct inode *inode, struct file *file,
838 + unsigned int cmd, unsigned long arg)
843 + if (cmd != LOCK_LOW_POWER && cmd != RELEASE_LOW_POWER)
844 + return video_ioctl2(inode, file, cmd, arg);
846 + pl = si4713_get_power_level(si4713_dev);
853 + if (copy_from_user(&pow, (void __user *)arg, sizeof(pow))) {
858 + if (cmd == LOCK_LOW_POWER) {
860 + if (pid_count == APP_MAX_NUM) {
865 + if (pid_count == 0) {
870 + /* Set max possible power level */
876 + rval = register_pid(current->pid);
881 + /* Lower min power level if asked */
887 + } else { /* RELEASE_LOW_POWER */
888 + rval = unregister_pid(current->pid);
893 + if (pid_count == 0) {
898 + rval = si4713_set_power_level(si4713_dev, pow);
904 * si4713_fops - file operations interface
906 static const struct file_operations si4713_fops = {
907 .owner = THIS_MODULE,
909 - .ioctl = video_ioctl2,
910 + .ioctl = si4713_priv_ioctl,
911 .compat_ioctl = v4l_compat_ioctl32,
918 + /* save to global pointer for it to be accesible from ioctl() call */
924 --- kernel-power-2.6.28.orig/drivers/media/radio/radio-si4713.h
925 +++ kernel-power-2.6.28/drivers/media/radio/radio-si4713.h
927 #define SI4713_I2C_ADDR_BUSEN_HIGH 0x63
928 #define SI4713_I2C_ADDR_BUSEN_LOW 0x11
930 +#define LOCK_LOW_POWER _IOW('v', BASE_VIDIOC_PRIVATE + 0, unsigned int)
931 +#define RELEASE_LOW_POWER _IOW('v', BASE_VIDIOC_PRIVATE + 1, unsigned int)
934 * Platform dependent definition
936 --- kernel-power-2.6.28.orig/drivers/media/video/omap34xxcam.c
937 +++ kernel-power-2.6.28/drivers/media/video/omap34xxcam.c
938 @@ -1833,6 +1833,7 @@
939 struct omap34xxcam_videodev *vdev = fh->vdev;
940 struct device *isp = vdev->cam->isp;
944 if (omap34xxcam_daemon_release(vdev, file))
946 @@ -1844,6 +1845,7 @@
947 omap34xxcam_slave_power_set(vdev, V4L2_POWER_STANDBY,
948 OMAP34XXCAM_SLAVE_POWER_ALL);
949 vdev->streaming = NULL;
953 if (atomic_dec_return(&vdev->users) == 0) {
954 @@ -1853,6 +1855,10 @@
956 mutex_unlock(&vdev->mutex);
959 + omap34xxcam_daemon_req_hw_reconfig(
960 + vdev, OMAP34XXCAM_DAEMON_HW_RECONFIG_STREAMOFF);
963 file->private_data = NULL;
965 --- kernel-power-2.6.28.orig/drivers/mmc/host/omap_hsmmc.c
966 +++ kernel-power-2.6.28/drivers/mmc/host/omap_hsmmc.c
968 /* Timeouts for entering power saving states on inactivity, msec */
969 #define OMAP_MMC_DISABLED_TIMEOUT 100
970 #define OMAP_MMC_SLEEP_TIMEOUT 1000
971 +#define OMAP_MMC_OFF_NOSLP_TIMEOUT 3000
972 #define OMAP_MMC_OFF_TIMEOUT 8000
975 @@ -1249,21 +1250,21 @@
978 * Dynamic power saving handling, FSM:
979 - * ENABLED -> DISABLED -> CARDSLEEP / REGSLEEP -> OFF
980 - * ^___________| | |
981 - * |______________________|______________________|
982 + * ENABLED -> DISABLED -> EXTDISABLED / CARDSLEEP / REGSLEEP -> OFF
983 + * ^___________| | |
984 + * |____________________________________|______________________|
986 - * ENABLED: mmc host is fully functional
987 - * DISABLED: fclk is off
988 - * CARDSLEEP: fclk is off, card is asleep, voltage regulator is asleep
989 - * REGSLEEP: fclk is off, voltage regulator is asleep
990 - * OFF: fclk is off, voltage regulator is off
991 + * ENABLED: mmc host is fully functional
992 + * (EXT)DISABLED: fclk is off
993 + * CARDSLEEP: fclk is off, card is asleep, voltage regulator is asleep
994 + * REGSLEEP: fclk is off, voltage regulator is asleep
995 + * OFF: fclk is off, voltage regulator is off
997 * Transition handlers return the timeout for the next state transition
1001 -enum {ENABLED = 0, DISABLED, CARDSLEEP, REGSLEEP, OFF};
1002 +enum {ENABLED = 0, DISABLED, EXTDISABLED, CARDSLEEP, REGSLEEP, OFF};
1004 /* Handler for [ENABLED -> DISABLED] transition */
1005 static int omap_hsmmc_enabled_to_disabled(struct omap_hsmmc_host *host)
1006 @@ -1300,7 +1301,21 @@
1010 -/* Handler for [DISABLED -> REGSLEEP / CARDSLEEP] transition */
1011 +/* Big SD cards (16GiB) are prohibited from
1012 + switching voltage regulator to asleep
1013 + because of high current consumption */
1014 +static int omap_hsmmc_support_sleep(struct mmc_host *mmc)
1016 + if (!(mmc->caps & MMC_CAP_NONREMOVABLE) &&
1017 + ((u64)mmc->card->csd.capacity << mmc->card->csd.read_blkbits) >
1018 + 14ULL * 1024 * 1024 * 1024) {
1025 +/* Handler for [DISABLED -> EXTDISABLED / REGSLEEP / CARDSLEEP] transition */
1026 static int omap_hsmmc_disabled_to_sleep(struct omap_hsmmc_host *host)
1028 int err, new_state, sleep;
1029 @@ -1319,12 +1334,12 @@
1031 new_state = CARDSLEEP;
1033 - new_state = REGSLEEP;
1034 + new_state = omap_hsmmc_support_sleep(host->mmc) ? REGSLEEP : EXTDISABLED;
1037 sleep = omap_hsmmc_full_sleep(host->mmc->card) &&
1038 (new_state == CARDSLEEP);
1039 - if (mmc_slot(host).set_sleep)
1040 + if (mmc_slot(host).set_sleep && new_state != EXTDISABLED)
1041 mmc_slot(host).set_sleep(host->dev, host->slot_id, 1, 0,
1043 /* FIXME: turn off bus power and perhaps interrupts too */
1044 @@ -1334,18 +1349,20 @@
1045 mmc_release_host(host->mmc);
1047 dev_dbg(mmc_dev(host->mmc), "DISABLED -> %s\n",
1048 - host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP");
1049 + host->dpm_state == CARDSLEEP ? "CARDSLEEP" :
1050 + host->dpm_state == REGSLEEP ? "REGSLEEP" : "EXTDISABLED");
1052 if ((host->mmc->caps & MMC_CAP_NONREMOVABLE) ||
1053 mmc_slot(host).card_detect ||
1054 (mmc_slot(host).get_cover_state &&
1055 mmc_slot(host).get_cover_state(host->dev, host->slot_id)))
1056 - return msecs_to_jiffies(OMAP_MMC_OFF_TIMEOUT);
1057 + return msecs_to_jiffies(new_state == EXTDISABLED ?
1058 + OMAP_MMC_OFF_NOSLP_TIMEOUT : OMAP_MMC_OFF_TIMEOUT);
1063 -/* Handler for [REGSLEEP / CARDSLEEP -> OFF] transition */
1064 +/* Handler for [EXTDISABLED / REGSLEEP / CARDSLEEP -> OFF] transition */
1065 static int omap_hsmmc_sleep_to_off(struct omap_hsmmc_host *host)
1067 if (!mmc_try_claim_host(host->mmc))
1068 @@ -1364,7 +1381,8 @@
1069 host->power_mode = MMC_POWER_OFF;
1071 dev_dbg(mmc_dev(host->mmc), "%s -> OFF\n",
1072 - host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP");
1073 + host->dpm_state == CARDSLEEP ? "CARDSLEEP" :
1074 + host->dpm_state == REGSLEEP ? "REGSLEEP" : "EXTDISABLED");
1076 host->dpm_state = OFF;
1078 @@ -1405,14 +1423,15 @@
1079 omap_hsmmc_context_restore(host);
1080 asleep = omap_hsmmc_full_sleep(host->mmc->card) &&
1081 (host->dpm_state == CARDSLEEP);
1082 - if (mmc_slot(host).set_sleep)
1083 + if (mmc_slot(host).set_sleep && host->dpm_state != EXTDISABLED)
1084 mmc_slot(host).set_sleep(host->dev, host->slot_id, 0,
1086 if (mmc_card_can_sleep(host->mmc))
1087 mmc_card_awake(host->mmc);
1089 dev_dbg(mmc_dev(host->mmc), "%s -> ENABLED\n",
1090 - host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP");
1091 + host->dpm_state == CARDSLEEP ? "CARDSLEEP" :
1092 + host->dpm_state == REGSLEEP ? "REGSLEEP" : "EXTDISABLED");
1094 if (host->pdata->set_pm_constraints)
1095 host->pdata->set_pm_constraints(host->dev, 1);
1096 @@ -1454,6 +1473,7 @@
1097 switch (host->dpm_state) {
1099 return omap_hsmmc_disabled_to_enabled(host);
1103 return omap_hsmmc_sleep_to_enabled(host);
1104 @@ -1484,6 +1504,7 @@
1107 return omap_hsmmc_disabled_to_sleep(host);
1111 return omap_hsmmc_sleep_to_off(host);
1112 --- kernel-power-2.6.28.orig/drivers/net/wireless/wl12xx/wl1251_acx.c
1113 +++ kernel-power-2.6.28/drivers/net/wireless/wl12xx/wl1251_acx.c
1117 *mactime = tsf_info->current_tsf_lsb |
1118 - (tsf_info->current_tsf_msb << 31);
1119 + ((unsigned long long) tsf_info->current_tsf_msb << 32);
1123 --- kernel-power-2.6.28.orig/drivers/net/wireless/wl12xx/wl1251_cmd.c
1124 +++ kernel-power-2.6.28/drivers/net/wireless/wl12xx/wl1251_cmd.c
1127 wl1251_error("tx %s cmd for channel %d failed",
1128 enable ? "start" : "stop", channel);
1133 wl1251_debug(DEBUG_BOOT, "tx %s cmd channel %d",
1134 --- kernel-power-2.6.28.orig/include/linux/sched.h
1135 +++ kernel-power-2.6.28/include/linux/sched.h
1136 @@ -1665,11 +1665,11 @@
1137 static inline void wake_up_idle_cpu(int cpu) { }
1140 +extern unsigned int sysctl_sched_child_runs_first;
1141 #ifdef CONFIG_SCHED_DEBUG
1142 extern unsigned int sysctl_sched_latency;
1143 extern unsigned int sysctl_sched_min_granularity;
1144 extern unsigned int sysctl_sched_wakeup_granularity;
1145 -extern unsigned int sysctl_sched_child_runs_first;
1146 extern unsigned int sysctl_sched_features;
1147 extern unsigned int sysctl_sched_migration_cost;
1148 extern unsigned int sysctl_sched_nr_migrate;
1149 --- kernel-power-2.6.28.orig/include/linux/swap.h
1150 +++ kernel-power-2.6.28/include/linux/swap.h
1151 @@ -130,6 +130,17 @@
1152 #define SWAP_MAP_MAX 0x7fff
1153 #define SWAP_MAP_BAD 0x8000
1155 +#define SWAP_GAP_TREE_SIZE 10
1156 +#define SWAP_GAP_RESCAN_TIMEO_MSEC 2000
1157 +#define swap_gap_len(gap) ((gap)->end - (gap)->next)
1158 +#define swap_gap_rb_entry(node) rb_entry(node, struct swap_gap_node, rb_node)
1159 +/* Struct to store gaps info */
1160 +struct swap_gap_node {
1161 + struct rb_node rb_node;
1162 + unsigned int next;
1167 * The in-memory structure used to track swap areas.
1170 unsigned int gap_next;
1171 unsigned int gap_end;
1172 unsigned int gaps_exist;
1173 + struct rb_root gaps_tree;
1174 + struct swap_gap_node *gap_pool_arr;
1175 + unsigned long gap_last_scan;
1176 unsigned int lowest_bit;
1177 unsigned int highest_bit;
1178 unsigned int cluster_next;
1179 --- kernel-power-2.6.28.orig/include/net/bluetooth/sco.h
1180 +++ kernel-power-2.6.28/include/net/bluetooth/sco.h
1182 #define SCO_DEFAULT_MTU 500
1183 #define SCO_DEFAULT_FLUSH_TO 0xFFFF
1185 -#define SCO_CONN_TIMEOUT (HZ * 40)
1186 +#define SCO_CONN_TIMEOUT (HZ * 25)
1187 #define SCO_DISCONN_TIMEOUT (HZ * 2)
1188 #define SCO_CONN_IDLE_TIMEOUT (HZ * 60)
1190 --- kernel-power-2.6.28.orig/kernel/sched_fair.c
1191 +++ kernel-power-2.6.28/kernel/sched_fair.c
1193 static unsigned int sched_nr_latency = 5;
1196 - * After fork, child runs first. (default) If set to 0 then
1197 + * After fork, child runs first. If set to 0 then
1198 * parent will (try to) run first.
1200 -const_debug unsigned int sysctl_sched_child_runs_first = 1;
1201 +unsigned int sysctl_sched_child_runs_first __read_mostly;
1204 * sys_sched_yield() compat mode
1205 --- kernel-power-2.6.28.orig/kernel/sysctl.c
1206 +++ kernel-power-2.6.28/kernel/sysctl.c
1207 @@ -235,6 +235,14 @@
1210 static struct ctl_table kern_table[] = {
1212 + .ctl_name = CTL_UNNUMBERED,
1213 + .procname = "sched_child_runs_first",
1214 + .data = &sysctl_sched_child_runs_first,
1215 + .maxlen = sizeof(unsigned int),
1217 + .proc_handler = &proc_dointvec,
1219 #ifdef CONFIG_SCHED_DEBUG
1221 .ctl_name = CTL_UNNUMBERED,
1222 @@ -289,14 +297,6 @@
1225 .ctl_name = CTL_UNNUMBERED,
1226 - .procname = "sched_child_runs_first",
1227 - .data = &sysctl_sched_child_runs_first,
1228 - .maxlen = sizeof(unsigned int),
1230 - .proc_handler = &proc_dointvec,
1233 - .ctl_name = CTL_UNNUMBERED,
1234 .procname = "sched_features",
1235 .data = &sysctl_sched_features,
1236 .maxlen = sizeof(unsigned int),
1237 --- kernel-power-2.6.28.orig/mm/swapfile.c
1238 +++ kernel-power-2.6.28/mm/swapfile.c
1239 @@ -996,11 +996,55 @@
1240 spin_unlock(&mmlist_lock);
1243 +void gaps_rbtree_insert(struct swap_info_struct *sis,
1244 + struct swap_gap_node *node)
1246 + struct rb_node **p = &sis->gaps_tree.rb_node;
1247 + struct rb_node *parent = NULL;
1248 + struct swap_gap_node *tmp;
1252 + tmp = rb_entry(parent, struct swap_gap_node, rb_node);
1253 + if (swap_gap_len(node) < swap_gap_len(tmp))
1254 + p = &(*p)->rb_left;
1256 + p = &(*p)->rb_right;
1258 + rb_link_node(&node->rb_node, parent, p);
1259 + rb_insert_color(&node->rb_node, &sis->gaps_tree);
1262 +void gaps_rbtree_add(struct swap_info_struct *sis,
1263 + unsigned int next, unsigned int end,
1264 + struct swap_gap_node **gap_min, int *pos)
1266 + struct swap_gap_node *gap_node;
1267 + if (*pos < SWAP_GAP_TREE_SIZE) {
1268 + gap_node = &sis->gap_pool_arr[*pos];
1270 + } else if (swap_gap_len(*gap_min) > end - next) {
1273 + gap_node = *gap_min;
1274 + rb_erase(&gap_node->rb_node, &sis->gaps_tree);
1275 + *gap_min = swap_gap_rb_entry(rb_first(&sis->gaps_tree));
1277 + gap_node->next = next;
1278 + gap_node->end = end;
1279 + if (gap_min && (*gap_min == NULL ||
1280 + swap_gap_len(*gap_min) > swap_gap_len(gap_node)))
1281 + *gap_min = gap_node;
1282 + gaps_rbtree_insert(sis, gap_node);
1285 /* Find the largest sequence of free pages */
1286 int find_gap(struct swap_info_struct *sis)
1288 unsigned i, uninitialized_var(start), uninitialized_var(gap_next);
1289 - unsigned uninitialized_var(gap_end), gap_size = 0;
1290 + unsigned uninitialized_var(gap_end);
1291 + struct swap_gap_node *gap_max, *gap_min = NULL;
1295 spin_unlock(&sis->remap_lock);
1296 @@ -1017,6 +1061,11 @@
1297 mutex_unlock(&sis->remap_mutex);
1300 + if (time_after(jiffies, sis->gap_last_scan +
1301 + msecs_to_jiffies(SWAP_GAP_RESCAN_TIMEO_MSEC)))
1302 + sis->gaps_tree = RB_ROOT;
1303 + if (!RB_EMPTY_ROOT(&sis->gaps_tree))
1305 spin_unlock(&sis->remap_lock);
1308 @@ -1028,11 +1077,7 @@
1310 if (!(sis->swap_remap[i] & 0x80000000))
1312 - if (i - start > gap_size) {
1315 - gap_size = i - start;
1317 + gaps_rbtree_add(sis, start, i - 1, &gap_min, &pos);
1320 if (sis->swap_remap[i] & 0x80000000)
1321 @@ -1043,13 +1088,14 @@
1324 spin_lock(&sis->remap_lock);
1325 - if (in_gap && i - start > gap_size) {
1326 - sis->gap_next = start;
1327 - sis->gap_end = i - 1;
1329 - sis->gap_next = gap_next;
1330 - sis->gap_end = gap_end;
1333 + gaps_rbtree_add(sis, start, i - 1, &gap_min, &pos);
1334 + sis->gap_last_scan = jiffies;
1336 + gap_max = swap_gap_rb_entry(rb_last(&sis->gaps_tree));
1337 + rb_erase(&gap_max->rb_node, &sis->gaps_tree);
1338 + sis->gap_next = gap_max->next;
1339 + sis->gap_end = gap_max->end;
1340 mutex_unlock(&sis->remap_mutex);
1343 @@ -1471,6 +1517,7 @@
1345 spin_unlock(&swap_lock);
1346 mutex_unlock(&swapon_mutex);
1347 + kfree(p->gap_pool_arr);
1348 vfree(p->swap_remap);
1350 inode = mapping->host;
1351 @@ -1825,6 +1872,14 @@
1355 + p->gap_pool_arr = kmalloc(sizeof(struct swap_gap_node)*
1356 + SWAP_GAP_TREE_SIZE, GFP_KERNEL);
1357 + if (!p->gap_pool_arr) {
1361 + p->gaps_tree = RB_ROOT;
1363 mutex_lock(&swapon_mutex);
1364 spin_lock(&swap_lock);
1365 if (swap_flags & SWAP_FLAG_PREFER)
1366 --- kernel-power-2.6.28.orig/net/bluetooth/hci_conn.c
1367 +++ kernel-power-2.6.28/net/bluetooth/hci_conn.c
1370 if (acl->state == BT_CONNECTED &&
1371 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1372 + acl->power_save = 1;
1373 + hci_conn_enter_active_mode(acl);
1375 if (lmp_esco_capable(hdev))
1376 hci_setup_sync(sco, acl->handle);
1378 --- kernel-power-2.6.28.orig/net/bluetooth/hci_event.c
1379 +++ kernel-power-2.6.28/net/bluetooth/hci_event.c
1380 @@ -1056,6 +1056,8 @@
1383 conn->link_mode |= HCI_LM_AUTH;
1385 + conn->sec_level = BT_SECURITY_LOW;
1387 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1389 @@ -1709,6 +1711,7 @@
1392 case 0x1c: /* SCO interval rejected */
1393 + case 0x1a: /* Unsupported Remote Feature */
1394 case 0x1f: /* Unspecified error */
1395 if (conn->out && conn->attempt < 2) {
1396 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |