Added Luke-Jr patches which fix scheduling while atomic bugs
authorPali Rohár <pali.rohar@gmail.com>
Sat, 7 Jan 2012 09:49:30 +0000 (10:49 +0100)
committerPali Rohár <pali.rohar@gmail.com>
Sat, 7 Jan 2012 09:49:30 +0000 (10:49 +0100)
kernel-power-2.6.28/debian/patches/0001-mtd-fix-a-huge-latency-problem-in-the-MTD-CFI-flash-.diff [new file with mode: 0644]
kernel-power-2.6.28/debian/patches/0002-mtd-change-struct-flchip_shared-spinlock-locking-int.diff [new file with mode: 0644]
kernel-power-2.6.28/debian/patches/ARM_6066_1_Fix_BUG_scheduling_while_atomic_swapper_0_0x00000002.diff [new file with mode: 0644]
kernel-power-2.6.28/debian/patches/USB-g_serial-don-t-set-low_latency-flag.diff [new file with mode: 0644]
kernel-power-2.6.28/debian/patches/bluetooth_scheduling_while_atomic_bug_fix.diff [new file with mode: 0644]
kernel-power-2.6.28/debian/patches/mac80211_fix_allocation_in_mesh_queue_preq.diff [new file with mode: 0644]
kernel-power-2.6.28/debian/patches/series

diff --git a/kernel-power-2.6.28/debian/patches/0001-mtd-fix-a-huge-latency-problem-in-the-MTD-CFI-flash-.diff b/kernel-power-2.6.28/debian/patches/0001-mtd-fix-a-huge-latency-problem-in-the-MTD-CFI-flash-.diff
new file mode 100644 (file)
index 0000000..8225b3d
--- /dev/null
@@ -0,0 +1,1568 @@
+From 5a2ef8426802c6c76df555b83cab4522f5bdf1bc Mon Sep 17 00:00:00 2001
+From: Stefani Seibold <stefani@seibold.net>
+Date: Sun, 18 Apr 2010 22:46:44 +0200
+Subject: [PATCH 1/2] mtd: fix a huge latency problem in the MTD CFI flash driver.
+
+The use of a memcpy() during a spinlock operation will cause very long
+thread context switch delays if the flash chip bandwidth is low and the
+data to be copied large, because a spinlock will disable preemption.
+
+For example: A flash with 6,5 MB/s bandwidth will cause under ubifs,
+which request sometimes 128 KiB (the flash erase size), a preemption delay of
+20 milliseconds. High priority threads will not be served during this
+time, regardless whether this threads access the flash or not. This behavior
+breaks real time.
+
+The patch changes all the use of spin_lock operations for xxxx->mutex
+into mutex operations, which is exact what the name says and means.
+
+I have checked the code of the drivers and there is no use of atomic
+pathes like interrupt or timers. The mtdoops facility will also not be used
+by this drivers. So it is dave to replace the spin_lock against mutex.
+
+There is no performance regression since the mutex is normally not
+acquired.
+
+Changelog:
+ 06.03.2010 First release
+ 26.03.2010 Fix mutex[1] issue and tested it for compile failure
+
+Signed-off-by: Stefani Seibold <stefani@seibold.net>
+Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
+Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
+---
+ drivers/mtd/chips/cfi_cmdset_0001.c |  131 +++++++++++++++++-----------------
+ drivers/mtd/chips/cfi_cmdset_0002.c |  124 ++++++++++++++++----------------
+ drivers/mtd/chips/cfi_cmdset_0020.c |  136 +++++++++++++++++-----------------
+ drivers/mtd/chips/fwh_lock.h        |    6 +-
+ drivers/mtd/chips/gen_probe.c       |    3 +-
+ include/linux/mtd/flashchip.h       |    4 +-
+ 6 files changed, 201 insertions(+), 203 deletions(-)
+
+diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
+index c93a8be..c9c3517 100644
+--- a/drivers/mtd/chips/cfi_cmdset_0001.c
++++ b/drivers/mtd/chips/cfi_cmdset_0001.c
+@@ -695,8 +695,7 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
+                               /* those should be reset too since
+                                  they create memory references. */
+                               init_waitqueue_head(&chip->wq);
+-                              spin_lock_init(&chip->_spinlock);
+-                              chip->mutex = &chip->_spinlock;
++                              mutex_init(&chip->mutex);
+                               chip++;
+                       }
+               }
+@@ -742,9 +741,9 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
+                       if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
+                               break;
+-                      spin_unlock(chip->mutex);
++                      mutex_unlock(&chip->mutex);
+                       cfi_udelay(1);
+-                      spin_lock(chip->mutex);
++                      mutex_lock(&chip->mutex);
+                       /* Someone else might have been playing with it. */
+                       return -EAGAIN;
+               }
+@@ -791,9 +790,9 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
+                               return -EIO;
+                       }
+-                      spin_unlock(chip->mutex);
++                      mutex_unlock(&chip->mutex);
+                       cfi_udelay(1);
+-                      spin_lock(chip->mutex);
++                      mutex_lock(&chip->mutex);
+                       /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
+                          So we can just loop here. */
+               }
+@@ -820,10 +819,10 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
+       sleep:
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               add_wait_queue(&chip->wq, &wait);
+-              spin_unlock(chip->mutex);
++              mutex_unlock(&chip->mutex);
+               schedule();
+               remove_wait_queue(&chip->wq, &wait);
+-              spin_lock(chip->mutex);
++              mutex_lock(&chip->mutex);
+               return -EAGAIN;
+       }
+ }
+@@ -869,20 +868,20 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
+                        * it'll happily send us to sleep.  In any case, when
+                        * get_chip returns success we're clear to go ahead.
+                        */
+-                      ret = spin_trylock(contender->mutex);
++                      ret = mutex_trylock(&contender->mutex);
+                       spin_unlock(&shared->lock);
+                       if (!ret)
+                               goto retry;
+-                      spin_unlock(chip->mutex);
++                      mutex_unlock(&chip->mutex);
+                       ret = chip_ready(map, contender, contender->start, mode);
+-                      spin_lock(chip->mutex);
++                      mutex_lock(&chip->mutex);
+                       if (ret == -EAGAIN) {
+-                              spin_unlock(contender->mutex);
++                              mutex_unlock(&contender->mutex);
+                               goto retry;
+                       }
+                       if (ret) {
+-                              spin_unlock(contender->mutex);
++                              mutex_unlock(&contender->mutex);
+                               return ret;
+                       }
+                       spin_lock(&shared->lock);
+@@ -891,10 +890,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
+                        * in FL_SYNCING state. Put contender and retry. */
+                       if (chip->state == FL_SYNCING) {
+                               put_chip(map, contender, contender->start);
+-                              spin_unlock(contender->mutex);
++                              mutex_unlock(&contender->mutex);
+                               goto retry;
+                       }
+-                      spin_unlock(contender->mutex);
++                      mutex_unlock(&contender->mutex);
+               }
+               /* Check if we already have suspended erase
+@@ -904,10 +903,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
+                       spin_unlock(&shared->lock);
+                       set_current_state(TASK_UNINTERRUPTIBLE);
+                       add_wait_queue(&chip->wq, &wait);
+-                      spin_unlock(chip->mutex);
++                      mutex_unlock(&chip->mutex);
+                       schedule();
+                       remove_wait_queue(&chip->wq, &wait);
+-                      spin_lock(chip->mutex);
++                      mutex_lock(&chip->mutex);
+                       goto retry;
+               }
+@@ -937,12 +936,12 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
+                       if (shared->writing && shared->writing != chip) {
+                               /* give back ownership to who we loaned it from */
+                               struct flchip *loaner = shared->writing;
+-                              spin_lock(loaner->mutex);
++                              mutex_lock(&loaner->mutex);
+                               spin_unlock(&shared->lock);
+-                              spin_unlock(chip->mutex);
++                              mutex_unlock(&chip->mutex);
+                               put_chip(map, loaner, loaner->start);
+-                              spin_lock(chip->mutex);
+-                              spin_unlock(loaner->mutex);
++                              mutex_lock(&chip->mutex);
++                              mutex_unlock(&loaner->mutex);
+                               wake_up(&chip->wq);
+                               return;
+                       }
+@@ -1112,7 +1111,7 @@ static int __xipram xip_wait_for_operation(
+                       (void) map_read(map, adr);
+                       xip_iprefetch();
+                       local_irq_enable();
+-                      spin_unlock(chip->mutex);
++                      mutex_unlock(&chip->mutex);
+                       xip_iprefetch();
+                       cond_resched();
+@@ -1122,15 +1121,15 @@ static int __xipram xip_wait_for_operation(
+                        * a suspended erase state.  If so let's wait
+                        * until it's done.
+                        */
+-                      spin_lock(chip->mutex);
++                      mutex_lock(&chip->mutex);
+                       while (chip->state != newstate) {
+                               DECLARE_WAITQUEUE(wait, current);
+                               set_current_state(TASK_UNINTERRUPTIBLE);
+                               add_wait_queue(&chip->wq, &wait);
+-                              spin_unlock(chip->mutex);
++                              mutex_unlock(&chip->mutex);
+                               schedule();
+                               remove_wait_queue(&chip->wq, &wait);
+-                              spin_lock(chip->mutex);
++                              mutex_lock(&chip->mutex);
+                       }
+                       /* Disallow XIP again */
+                       local_irq_disable();
+@@ -1186,10 +1185,10 @@ static int inval_cache_and_wait_for_operation(
+       int chip_state = chip->state;
+       unsigned int timeo, sleep_time, reset_timeo;
+-      spin_unlock(chip->mutex);
++      mutex_unlock(&chip->mutex);
+       if (inval_len)
+               INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
+-      spin_lock(chip->mutex);
++      mutex_lock(&chip->mutex);
+       timeo = chip_op_time_max;
+       if (!timeo)
+@@ -1209,7 +1208,7 @@ static int inval_cache_and_wait_for_operation(
+               }
+               /* OK Still waiting. Drop the lock, wait a while and retry. */
+-              spin_unlock(chip->mutex);
++              mutex_unlock(&chip->mutex);
+               if (sleep_time >= 1000000/HZ) {
+                       /*
+                        * Half of the normal delay still remaining
+@@ -1224,17 +1223,17 @@ static int inval_cache_and_wait_for_operation(
+                       cond_resched();
+                       timeo--;
+               }
+-              spin_lock(chip->mutex);
++              mutex_lock(&chip->mutex);
+               while (chip->state != chip_state) {
+                       /* Someone's suspended the operation: sleep */
+                       DECLARE_WAITQUEUE(wait, current);
+                       set_current_state(TASK_UNINTERRUPTIBLE);
+                       add_wait_queue(&chip->wq, &wait);
+-                      spin_unlock(chip->mutex);
++                      mutex_unlock(&chip->mutex);
+                       schedule();
+                       remove_wait_queue(&chip->wq, &wait);
+-                      spin_lock(chip->mutex);
++                      mutex_lock(&chip->mutex);
+               }
+               if (chip->erase_suspended || chip->write_suspended)  {
+                       /* Suspend has occured while sleep: reset timeout */
+@@ -1266,7 +1265,7 @@ static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t a
+       /* Ensure cmd read/writes are aligned. */
+       cmd_addr = adr & ~(map_bankwidth(map)-1);
+-      spin_lock(chip->mutex);
++      mutex_lock(&chip->mutex);
+       ret = get_chip(map, chip, cmd_addr, FL_POINT);
+@@ -1277,7 +1276,7 @@ static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t a
+               chip->state = FL_POINT;
+               chip->ref_point_counter++;
+       }
+-      spin_unlock(chip->mutex);
++      mutex_unlock(&chip->mutex);
+       return ret;
+ }
+@@ -1362,7 +1361,7 @@ static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+               else
+                       thislen = len;
+-              spin_lock(chip->mutex);
++              mutex_lock(&chip->mutex);
+               if (chip->state == FL_POINT) {
+                       chip->ref_point_counter--;
+                       if(chip->ref_point_counter == 0)
+@@ -1371,7 +1370,7 @@ static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+                       printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
+               put_chip(map, chip, chip->start);
+-              spin_unlock(chip->mutex);
++              mutex_unlock(&chip->mutex);
+               len -= thislen;
+               ofs = 0;
+@@ -1390,10 +1389,10 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
+       /* Ensure cmd read/writes are aligned. */
+       cmd_addr = adr & ~(map_bankwidth(map)-1);
+-      spin_lock(chip->mutex);
++      mutex_lock(&chip->mutex);
+       ret = get_chip(map, chip, cmd_addr, FL_READY);
+       if (ret) {
+-              spin_unlock(chip->mutex);
++              mutex_unlock(&chip->mutex);
+               return ret;
+       }
+@@ -1407,7 +1406,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
+       put_chip(map, chip, cmd_addr);
+-      spin_unlock(chip->mutex);
++      mutex_unlock(&chip->mutex);
+       return 0;
+ }
+@@ -1470,10 +1469,10 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
+               return -EINVAL;
+       }
+-      spin_lock(chip->mutex);
++      mutex_lock(&chip->mutex);
+       ret = get_chip(map, chip, adr, mode);
+       if (ret) {
+-              spin_unlock(chip->mutex);
++              mutex_unlock(&chip->mutex);
+               return ret;
+       }
+@@ -1519,7 +1518,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
+       xip_enable(map, chip, adr);
+  out: put_chip(map, chip, adr);
+-      spin_unlock(chip->mutex);
++      mutex_unlock(&chip->mutex);
+       return ret;
+ }
+@@ -1628,10 +1627,10 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
+       /* Let's determine this according to the interleave only once */
+       write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
+-      spin_lock(chip->mutex);
++      mutex_lock(&chip->mutex);
+       ret = get_chip(map, chip, cmd_adr, FL_WRITING);
+       if (ret) {
+-              spin_unlock(chip->mutex);
++              mutex_unlock(&chip->mutex);
+               return ret;
+       }
+@@ -1762,7 +1761,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
+       xip_enable(map, chip, cmd_adr);
+  out: put_chip(map, chip, cmd_adr);
+-      spin_unlock(chip->mutex);
++      mutex_unlock(&chip->mutex);
+       return ret;
+ }
+@@ -1841,10 +1840,10 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
+       adr += chip->start;
+  retry:
+-      spin_lock(chip->mutex);
++      mutex_lock(&chip->mutex);
+       ret = get_chip(map, chip, adr, FL_ERASING);
+       if (ret) {
+-              spin_unlock(chip->mutex);
++              mutex_unlock(&chip->mutex);
+               return ret;
+       }
+@@ -1900,7 +1899,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
+               } else if (chipstatus & 0x20 && retries--) {
+                       printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
+                       put_chip(map, chip, adr);
+-                      spin_unlock(chip->mutex);
++                      mutex_unlock(&chip->mutex);
+                       goto retry;
+               } else {
+                       printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
+@@ -1912,7 +1911,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
+       xip_enable(map, chip, adr);
+  out: put_chip(map, chip, adr);
+-      spin_unlock(chip->mutex);
++      mutex_unlock(&chip->mutex);
+       return ret;
+ }
+@@ -1945,7 +1944,7 @@ static void cfi_intelext_sync (struct mtd_info *mtd)
+       for (i=0; !ret && i<cfi->numchips; i++) {
+               chip = &cfi->chips[i];
+-              spin_lock(chip->mutex);
++              mutex_lock(&chip->mutex);
+               ret = get_chip(map, chip, chip->start, FL_SYNCING);
+               if (!ret) {
+@@ -1956,7 +1955,7 @@ static void cfi_intelext_sync (struct mtd_info *mtd)
+                        * with the chip now anyway.
+                        */
+               }
+-              spin_unlock(chip->mutex);
++              mutex_unlock(&chip->mutex);
+       }
+       /* Unlock the chips again */
+@@ -1964,14 +1963,14 @@ static void cfi_intelext_sync (struct mtd_info *mtd)
+       for (i--; i >=0; i--) {
+               chip = &cfi->chips[i];
+-              spin_lock(chip->mutex);
++              mutex_lock(&chip->mutex);
+               if (chip->state == FL_SYNCING) {
+                       chip->state = chip->oldstate;
+                       chip->oldstate = FL_READY;
+                       wake_up(&chip->wq);
+               }
+-              spin_unlock(chip->mutex);
++              mutex_unlock(&chip->mutex);
+       }
+ }
+@@ -2017,10 +2016,10 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
+       adr += chip->start;
+-      spin_lock(chip->mutex);
++      mutex_lock(&chip->mutex);
+       ret = get_chip(map, chip, adr, FL_LOCKING);
+       if (ret) {
+-              spin_unlock(chip->mutex);
++              mutex_unlock(&chip->mutex);
+               return ret;
+       }
+@@ -2054,7 +2053,7 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
+       xip_enable(map, chip, adr);
+ out:  put_chip(map, chip, adr);
+-      spin_unlock(chip->mutex);
++      mutex_unlock(&chip->mutex);
+       return ret;
+ }
+@@ -2119,10 +2118,10 @@ do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
+       struct cfi_private *cfi = map->fldrv_priv;
+       int ret;
+-      spin_lock(chip->mutex);
++      mutex_lock(&chip->mutex);
+       ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
+       if (ret) {
+-              spin_unlock(chip->mutex);
++              mutex_unlock(&chip->mutex);
+               return ret;
+       }
+@@ -2141,7 +2140,7 @@ do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
+       INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
+       put_chip(map, chip, chip->start);
+-      spin_unlock(chip->mutex);
++      mutex_unlock(&chip->mutex);
+       return 0;
+ }
+@@ -2416,7 +2415,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
+       for (i=0; !ret && i<cfi->numchips; i++) {
+               chip = &cfi->chips[i];
+-              spin_lock(chip->mutex);
++              mutex_lock(&chip->mutex);
+               switch (chip->state) {
+               case FL_READY:
+@@ -2448,7 +2447,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
+               case FL_PM_SUSPENDED:
+                       break;
+               }
+-              spin_unlock(chip->mutex);
++              mutex_unlock(&chip->mutex);
+       }
+       /* Unlock the chips again */
+@@ -2457,7 +2456,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
+               for (i--; i >=0; i--) {
+                       chip = &cfi->chips[i];
+-                      spin_lock(chip->mutex);
++                      mutex_lock(&chip->mutex);
+                       if (chip->state == FL_PM_SUSPENDED) {
+                               /* No need to force it into a known state here,
+@@ -2467,7 +2466,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
+                               chip->oldstate = FL_READY;
+                               wake_up(&chip->wq);
+                       }
+-                      spin_unlock(chip->mutex);
++                      mutex_unlock(&chip->mutex);
+               }
+       }
+@@ -2508,7 +2507,7 @@ static void cfi_intelext_resume(struct mtd_info *mtd)
+               chip = &cfi->chips[i];
+-              spin_lock(chip->mutex);
++              mutex_lock(&chip->mutex);
+               /* Go to known state. Chip may have been power cycled */
+               if (chip->state == FL_PM_SUSPENDED) {
+@@ -2517,7 +2516,7 @@ static void cfi_intelext_resume(struct mtd_info *mtd)
+                       wake_up(&chip->wq);
+               }
+-              spin_unlock(chip->mutex);
++              mutex_unlock(&chip->mutex);
+       }
+       if ((mtd->flags & MTD_POWERUP_LOCK)
+@@ -2537,13 +2536,13 @@ static int cfi_intelext_reset(struct mtd_info *mtd)
+               /* force the completion of any ongoing operation
+                  and switch to array mode so any bootloader in
+                  flash is accessible for soft reboot. */
+-              spin_lock(chip->mutex);
++              mutex_lock(&chip->mutex);
+               ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
+               if (!ret) {
+                       map_write(map, CMD(0xff), chip->start);
+                       chip->state = FL_SHUTDOWN;
+               }
+-              spin_unlock(chip->mutex);
++              mutex_unlock(&chip->mutex);
+       }
+       return 0;
+diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
+index d74ec46..090c394 100644
+--- a/drivers/mtd/chips/cfi_cmdset_0002.c
++++ b/drivers/mtd/chips/cfi_cmdset_0002.c
+@@ -563,12 +563,12 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
+                       if (time_after(jiffies, timeo)) {
+                               printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
+-                              spin_unlock(chip->mutex);
++                              mutex_unlock(chip->mutex);
+                               return -EIO;
+                       }
+-                      spin_unlock(chip->mutex);
++                      mutex_unlock(&chip->mutex);
+                       cfi_udelay(1);
+-                      spin_lock(chip->mutex);
++                      mutex_lock(&chip->mutex);
+                       /* Someone else might have been playing with it. */
+                       goto retry;
+               }
+@@ -618,9 +618,9 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
+                               return -EIO;
+                       }
+-                      spin_unlock(chip->mutex);
++                      mutex_unlock(&chip->mutex);
+                       cfi_udelay(1);
+-                      spin_lock(chip->mutex);
++                      mutex_lock(&chip->mutex);
+                       /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
+                          So we can just loop here. */
+               }
+@@ -644,10 +644,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
+       sleep:
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               add_wait_queue(&chip->wq, &wait);
+-              spin_unlock(chip->mutex);
++              mutex_unlock(&chip->mutex);
+               schedule();
+               remove_wait_queue(&chip->wq, &wait);
+-              spin_lock(chip->mutex);
++              mutex_lock(&chip->mutex);
+               goto resettime;
+       }
+ }
+@@ -779,7 +779,7 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
+                       (void) map_read(map, adr);
+                       xip_iprefetch();
+                       local_irq_enable();
+-                      spin_unlock(chip->mutex);
++                      mutex_unlock(&chip->mutex);
+                       xip_iprefetch();
+                       cond_resched();
+@@ -789,15 +789,15 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
+                        * a suspended erase state.  If so let's wait
+                        * until it's done.
+                        */
+-                      spin_lock(chip->mutex);
++                      mutex_lock(&chip->mutex);
+                       while (chip->state != FL_XIP_WHILE_ERASING) {
+                               DECLARE_WAITQUEUE(wait, current);
+                               set_current_state(TASK_UNINTERRUPTIBLE);
+                               add_wait_queue(&chip->wq, &wait);
+-                              spin_unlock(chip->mutex);
++                              mutex_unlock(&chip->mutex);
+                               schedule();
+                               remove_wait_queue(&chip->wq, &wait);
+-                              spin_lock(chip->mutex);
++                              mutex_lock(&chip->mutex);
+                       }
+                       /* Disallow XIP again */
+                       local_irq_disable();
+@@ -859,17 +859,17 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
+ #define UDELAY(map, chip, adr, usec)  \
+ do {  \
+-      spin_unlock(chip->mutex);  \
++      mutex_unlock(&chip->mutex);  \
+       cfi_udelay(usec);  \
+-      spin_lock(chip->mutex);  \
++      mutex_lock(&chip->mutex);  \
+ } while (0)
+ #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
+ do {  \
+-      spin_unlock(chip->mutex);  \
++      mutex_unlock(&chip->mutex);  \
+       INVALIDATE_CACHED_RANGE(map, adr, len);  \
+       cfi_udelay(usec);  \
+-      spin_lock(chip->mutex);  \
++      mutex_lock(&chip->mutex);  \
+ } while (0)
+ #endif
+@@ -885,10 +885,10 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
+       /* Ensure cmd read/writes are aligned. */
+       cmd_addr = adr & ~(map_bankwidth(map)-1);
+-      spin_lock(chip->mutex);
++      mutex_lock(&chip->mutex);
+       ret = get_chip(map, chip, cmd_addr, FL_READY);
+       if (ret) {
+-              spin_unlock(chip->mutex);
++              mutex_unlock(&chip->mutex);
+               return ret;
+       }
+@@ -901,7 +901,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
+       put_chip(map, chip, cmd_addr);
+-      spin_unlock(chip->mutex);
++      mutex_unlock(&chip->mutex);
+       return 0;
+ }
+@@ -955,7 +955,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
+       struct cfi_private *cfi = map->fldrv_priv;
+  retry:
+-      spin_lock(chip->mutex);
++      mutex_lock(&chip->mutex);
+       if (chip->state != FL_READY){
+ #if 0
+@@ -964,7 +964,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               add_wait_queue(&chip->wq, &wait);
+-              spin_unlock(chip->mutex);
++              mutex_unlock(&chip->mutex);
+               schedule();
+               remove_wait_queue(&chip->wq, &wait);
+@@ -993,7 +993,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
+       cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+       wake_up(&chip->wq);
+-      spin_unlock(chip->mutex);
++      mutex_unlock(&chip->mutex);
+       return 0;
+ }
+@@ -1062,10 +1062,10 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
+       adr += chip->start;
+-      spin_lock(chip->mutex);
++      mutex_lock(&chip->mutex);
+       ret = get_chip(map, chip, adr, FL_WRITING);
+       if (ret) {
+-              spin_unlock(chip->mutex);
++              mutex_unlock(&chip->mutex);
+               return ret;
+       }
+@@ -1108,11 +1108,11 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
+                       set_current_state(TASK_UNINTERRUPTIBLE);
+                       add_wait_queue(&chip->wq, &wait);
+-                      spin_unlock(chip->mutex);
++                      mutex_unlock(&chip->mutex);
+                       schedule();
+                       remove_wait_queue(&chip->wq, &wait);
+                       timeo = jiffies + (HZ / 2); /* FIXME */
+-                      spin_lock(chip->mutex);
++                      mutex_lock(&chip->mutex);
+                       continue;
+               }
+@@ -1144,7 +1144,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
+  op_done:
+       chip->state = FL_READY;
+       put_chip(map, chip, adr);
+-      spin_unlock(chip->mutex);
++      mutex_unlock(&chip->mutex);
+       return ret;
+ }
+@@ -1176,7 +1176,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
+               map_word tmp_buf;
+  retry:
+-              spin_lock(cfi->chips[chipnum].mutex);
++              mutex_lock(&cfi->chips[chipnum].mutex);
+               if (cfi->chips[chipnum].state != FL_READY) {
+ #if 0
+@@ -1185,7 +1185,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
+                       set_current_state(TASK_UNINTERRUPTIBLE);
+                       add_wait_queue(&cfi->chips[chipnum].wq, &wait);
+-                      spin_unlock(cfi->chips[chipnum].mutex);
++                      mutex_unlock(&cfi->chips[chipnum].mutex);
+                       schedule();
+                       remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
+@@ -1199,7 +1199,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
+               /* Load 'tmp_buf' with old contents of flash */
+               tmp_buf = map_read(map, bus_ofs+chipstart);
+-              spin_unlock(cfi->chips[chipnum].mutex);
++              mutex_unlock(&cfi->chips[chipnum].mutex);
+               /* Number of bytes to copy from buffer */
+               n = min_t(int, len, map_bankwidth(map)-i);
+@@ -1254,7 +1254,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
+               map_word tmp_buf;
+  retry1:
+-              spin_lock(cfi->chips[chipnum].mutex);
++              mutex_lock(&cfi->chips[chipnum].mutex);
+               if (cfi->chips[chipnum].state != FL_READY) {
+ #if 0
+@@ -1263,7 +1263,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
+                       set_current_state(TASK_UNINTERRUPTIBLE);
+                       add_wait_queue(&cfi->chips[chipnum].wq, &wait);
+-                      spin_unlock(cfi->chips[chipnum].mutex);
++                      mutex_unlock(&cfi->chips[chipnum].mutex);
+                       schedule();
+                       remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
+@@ -1276,7 +1276,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
+               tmp_buf = map_read(map, ofs + chipstart);
+-              spin_unlock(cfi->chips[chipnum].mutex);
++              mutex_unlock(&cfi->chips[chipnum].mutex);
+               tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
+@@ -1311,10 +1311,10 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
+       adr += chip->start;
+       cmd_adr = adr;
+-      spin_lock(chip->mutex);
++      mutex_lock(&chip->mutex);
+       ret = get_chip(map, chip, adr, FL_WRITING);
+       if (ret) {
+-              spin_unlock(chip->mutex);
++              mutex_unlock(&chip->mutex);
+               return ret;
+       }
+@@ -1369,11 +1369,11 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
+                       set_current_state(TASK_UNINTERRUPTIBLE);
+                       add_wait_queue(&chip->wq, &wait);
+-                      spin_unlock(chip->mutex);
++                      mutex_unlock(&chip->mutex);
+                       schedule();
+                       remove_wait_queue(&chip->wq, &wait);
+                       timeo = jiffies + (HZ / 2); /* FIXME */
+-                      spin_lock(chip->mutex);
++                      mutex_lock(&chip->mutex);
+                       continue;
+               }
+@@ -1401,7 +1401,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
+  op_done:
+       chip->state = FL_READY;
+       put_chip(map, chip, adr);
+-      spin_unlock(chip->mutex);
++      mutex_unlock(&chip->mutex);
+       return ret;
+ }
+@@ -1501,10 +1501,10 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
+       adr = cfi->addr_unlock1;
+-      spin_lock(chip->mutex);
++      mutex_lock(&chip->mutex);
+       ret = get_chip(map, chip, adr, FL_WRITING);
+       if (ret) {
+-              spin_unlock(chip->mutex);
++              mutex_unlock(&chip->mutex);
+               return ret;
+       }
+@@ -1537,10 +1537,10 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
+                       /* Someone's suspended the erase. Sleep */
+                       set_current_state(TASK_UNINTERRUPTIBLE);
+                       add_wait_queue(&chip->wq, &wait);
+-                      spin_unlock(chip->mutex);
++                      mutex_unlock(&chip->mutex);
+                       schedule();
+                       remove_wait_queue(&chip->wq, &wait);
+-                      spin_lock(chip->mutex);
++                      mutex_lock(&chip->mutex);
+                       continue;
+               }
+               if (chip->erase_suspended) {
+@@ -1574,7 +1574,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
+       chip->state = FL_READY;
+       xip_enable(map, chip, adr);
+       put_chip(map, chip, adr);
+-      spin_unlock(chip->mutex);
++      mutex_unlock(&chip->mutex);
+       return ret;
+ }
+@@ -1589,10 +1589,10 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
+       adr += chip->start;
+-      spin_lock(chip->mutex);
++      mutex_lock(&chip->mutex);
+       ret = get_chip(map, chip, adr, FL_ERASING);
+       if (ret) {
+-              spin_unlock(chip->mutex);
++              mutex_unlock(&chip->mutex);
+               return ret;
+       }
+@@ -1625,10 +1625,10 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
+                       /* Someone's suspended the erase. Sleep */
+                       set_current_state(TASK_UNINTERRUPTIBLE);
+                       add_wait_queue(&chip->wq, &wait);
+-                      spin_unlock(chip->mutex);
++                      mutex_unlock(&chip->mutex);
+                       schedule();
+                       remove_wait_queue(&chip->wq, &wait);
+-                      spin_lock(chip->mutex);
++                      mutex_lock(&chip->mutex);
+                       continue;
+               }
+               if (chip->erase_suspended) {
+@@ -1664,7 +1664,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
+       chip->state = FL_READY;
+       put_chip(map, chip, adr);
+-      spin_unlock(chip->mutex);
++      mutex_unlock(&chip->mutex);
+       return ret;
+ }
+@@ -1716,7 +1716,7 @@ static int do_atmel_lock(struct map_info *map, struct flchip *chip,
+       struct cfi_private *cfi = map->fldrv_priv;
+       int ret;
+-      spin_lock(chip->mutex);
++      mutex_lock(&chip->mutex);
+       ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
+       if (ret)
+               goto out_unlock;
+@@ -1742,7 +1742,7 @@ static int do_atmel_lock(struct map_info *map, struct flchip *chip,
+       ret = 0;
+ out_unlock:
+-      spin_unlock(chip->mutex);
++      mutex_unlock(&chip->mutex);
+       return ret;
+ }
+@@ -1752,7 +1752,7 @@ static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
+       struct cfi_private *cfi = map->fldrv_priv;
+       int ret;
+-      spin_lock(chip->mutex);
++      mutex_lock(&chip->mutex);
+       ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
+       if (ret)
+               goto out_unlock;
+@@ -1770,7 +1770,7 @@ static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
+       ret = 0;
+ out_unlock:
+-      spin_unlock(chip->mutex);
++      mutex_unlock(&chip->mutex);
+       return ret;
+ }
+@@ -1798,7 +1798,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
+               chip = &cfi->chips[i];
+       retry:
+-              spin_lock(chip->mutex);
++              mutex_lock(&chip->mutex);
+               switch(chip->state) {
+               case FL_READY:
+@@ -1812,7 +1812,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
+                        * with the chip now anyway.
+                        */
+               case FL_SYNCING:
+-                      spin_unlock(chip->mutex);
++                      mutex_unlock(&chip->mutex);
+                       break;
+               default:
+@@ -1820,7 +1820,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
+                       set_current_state(TASK_UNINTERRUPTIBLE);
+                       add_wait_queue(&chip->wq, &wait);
+-                      spin_unlock(chip->mutex);
++                      mutex_unlock(&chip->mutex);
+                       schedule();
+@@ -1835,13 +1835,13 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
+       for (i--; i >=0; i--) {
+               chip = &cfi->chips[i];
+-              spin_lock(chip->mutex);
++              mutex_lock(&chip->mutex);
+               if (chip->state == FL_SYNCING) {
+                       chip->state = chip->oldstate;
+                       wake_up(&chip->wq);
+               }
+-              spin_unlock(chip->mutex);
++              mutex_unlock(&chip->mutex);
+       }
+ }
+@@ -1857,7 +1857,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd)
+       for (i=0; !ret && i<cfi->numchips; i++) {
+               chip = &cfi->chips[i];
+-              spin_lock(chip->mutex);
++              mutex_lock(&chip->mutex);
+               switch(chip->state) {
+               case FL_READY:
+@@ -1877,7 +1877,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd)
+                       ret = -EAGAIN;
+                       break;
+               }
+-              spin_unlock(chip->mutex);
++              mutex_unlock(&chip->mutex);
+       }
+       /* Unlock the chips again */
+@@ -1886,13 +1886,13 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd)
+               for (i--; i >=0; i--) {
+                       chip = &cfi->chips[i];
+-                      spin_lock(chip->mutex);
++                      mutex_lock(&chip->mutex);
+                       if (chip->state == FL_PM_SUSPENDED) {
+                               chip->state = chip->oldstate;
+                               wake_up(&chip->wq);
+                       }
+-                      spin_unlock(chip->mutex);
++                      mutex_unlock(&chip->mutex);
+               }
+       }
+@@ -1911,7 +1911,7 @@ static void cfi_amdstd_resume(struct mtd_info *mtd)
+               chip = &cfi->chips[i];
+-              spin_lock(chip->mutex);
++              mutex_lock(&chip->mutex);
+               if (chip->state == FL_PM_SUSPENDED) {
+                       chip->state = FL_READY;
+@@ -1921,7 +1921,7 @@ static void cfi_amdstd_resume(struct mtd_info *mtd)
+               else
+                       printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
+-              spin_unlock(chip->mutex);
++              mutex_unlock(&chip->mutex);
+       }
+ }
+diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
+index d4714dd..0322c64 100644
+--- a/drivers/mtd/chips/cfi_cmdset_0020.c
++++ b/drivers/mtd/chips/cfi_cmdset_0020.c
+@@ -265,7 +265,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
+       timeo = jiffies + HZ;
+  retry:
+-      spin_lock_bh(chip->mutex);
++      mutex_lock(&chip->mutex);
+       /* Check that the chip's ready to talk to us.
+        * If it's in FL_ERASING state, suspend it and make it talk now.
+@@ -296,15 +296,15 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
+                               /* make sure we're in 'read status' mode */
+                               map_write(map, CMD(0x70), cmd_addr);
+                               chip->state = FL_ERASING;
+-                              spin_unlock_bh(chip->mutex);
++                              mutex_unlock(&chip->mutex);
+                               printk(KERN_ERR "Chip not ready after erase "
+                                      "suspended: status = 0x%lx\n", status.x[0]);
+                               return -EIO;
+                       }
+-                      spin_unlock_bh(chip->mutex);
++                      mutex_unlock(&chip->mutex);
+                       cfi_udelay(1);
+-                      spin_lock_bh(chip->mutex);
++                      mutex_lock(&chip->mutex);
+               }
+               suspended = 1;
+@@ -335,13 +335,13 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
+               /* Urgh. Chip not yet ready to talk to us. */
+               if (time_after(jiffies, timeo)) {
+-                      spin_unlock_bh(chip->mutex);
++                      mutex_unlock(&chip->mutex);
+                       printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]);
+                       return -EIO;
+               }
+               /* Latency issues. Drop the lock, wait a while and retry */
+-              spin_unlock_bh(chip->mutex);
++              mutex_unlock(&chip->mutex);
+               cfi_udelay(1);
+               goto retry;
+@@ -351,7 +351,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
+                  someone changes the status */
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               add_wait_queue(&chip->wq, &wait);
+-              spin_unlock_bh(chip->mutex);
++              mutex_unlock(&chip->mutex);
+               schedule();
+               remove_wait_queue(&chip->wq, &wait);
+               timeo = jiffies + HZ;
+@@ -376,7 +376,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
+       }
+       wake_up(&chip->wq);
+-      spin_unlock_bh(chip->mutex);
++      mutex_unlock(&chip->mutex);
+       return 0;
+ }
+@@ -445,7 +445,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
+ #ifdef DEBUG_CFI_FEATURES
+        printk("%s: chip->state[%d]\n", __func__, chip->state);
+ #endif
+-      spin_lock_bh(chip->mutex);
++      mutex_lock(&chip->mutex);
+       /* Check that the chip's ready to talk to us.
+        * Later, we can actually think about interrupting it
+@@ -470,14 +470,14 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
+                       break;
+               /* Urgh. Chip not yet ready to talk to us. */
+               if (time_after(jiffies, timeo)) {
+-                      spin_unlock_bh(chip->mutex);
++                      mutex_unlock(&chip->mutex);
+                         printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n",
+                                status.x[0], map_read(map, cmd_adr).x[0]);
+                       return -EIO;
+               }
+               /* Latency issues. Drop the lock, wait a while and retry */
+-              spin_unlock_bh(chip->mutex);
++              mutex_unlock(&chip->mutex);
+               cfi_udelay(1);
+               goto retry;
+@@ -486,7 +486,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
+                  someone changes the status */
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               add_wait_queue(&chip->wq, &wait);
+-              spin_unlock_bh(chip->mutex);
++              mutex_unlock(&chip->mutex);
+               schedule();
+               remove_wait_queue(&chip->wq, &wait);
+               timeo = jiffies + HZ;
+@@ -503,16 +503,16 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
+               if (map_word_andequal(map, status, status_OK, status_OK))
+                       break;
+-              spin_unlock_bh(chip->mutex);
++              mutex_unlock(&chip->mutex);
+               cfi_udelay(1);
+-              spin_lock_bh(chip->mutex);
++              mutex_lock(&chip->mutex);
+               if (++z > 100) {
+                       /* Argh. Not ready for write to buffer */
+                       DISABLE_VPP(map);
+                         map_write(map, CMD(0x70), cmd_adr);
+                       chip->state = FL_STATUS;
+-                      spin_unlock_bh(chip->mutex);
++                      mutex_unlock(&chip->mutex);
+                       printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]);
+                       return -EIO;
+               }
+@@ -532,9 +532,9 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
+       map_write(map, CMD(0xd0), cmd_adr);
+       chip->state = FL_WRITING;
+-      spin_unlock_bh(chip->mutex);
++      mutex_unlock(&chip->mutex);
+       cfi_udelay(chip->buffer_write_time);
+-      spin_lock_bh(chip->mutex);
++      mutex_lock(&chip->mutex);
+       timeo = jiffies + (HZ/2);
+       z = 0;
+@@ -543,11 +543,11 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
+                       /* Someone's suspended the write. Sleep */
+                       set_current_state(TASK_UNINTERRUPTIBLE);
+                       add_wait_queue(&chip->wq, &wait);
+-                      spin_unlock_bh(chip->mutex);
++                      mutex_unlock(&chip->mutex);
+                       schedule();
+                       remove_wait_queue(&chip->wq, &wait);
+                       timeo = jiffies + (HZ / 2); /* FIXME */
+-                      spin_lock_bh(chip->mutex);
++                      mutex_lock(&chip->mutex);
+                       continue;
+               }
+@@ -563,16 +563,16 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
+                         map_write(map, CMD(0x70), adr);
+                       chip->state = FL_STATUS;
+                       DISABLE_VPP(map);
+-                      spin_unlock_bh(chip->mutex);
++                      mutex_unlock(&chip->mutex);
+                       printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
+                       return -EIO;
+               }
+               /* Latency issues. Drop the lock, wait a while and retry */
+-              spin_unlock_bh(chip->mutex);
++              mutex_unlock(&chip->mutex);
+               cfi_udelay(1);
+               z++;
+-              spin_lock_bh(chip->mutex);
++              mutex_lock(&chip->mutex);
+       }
+       if (!z) {
+               chip->buffer_write_time--;
+@@ -596,11 +596,11 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
+               /* put back into read status register mode */
+               map_write(map, CMD(0x70), adr);
+               wake_up(&chip->wq);
+-              spin_unlock_bh(chip->mutex);
++              mutex_unlock(&chip->mutex);
+               return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO;
+       }
+       wake_up(&chip->wq);
+-      spin_unlock_bh(chip->mutex);
++      mutex_unlock(&chip->mutex);
+         return 0;
+ }
+@@ -749,7 +749,7 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
+       timeo = jiffies + HZ;
+ retry:
+-      spin_lock_bh(chip->mutex);
++      mutex_lock(&chip->mutex);
+       /* Check that the chip's ready to talk to us. */
+       switch (chip->state) {
+@@ -766,13 +766,13 @@ retry:
+               /* Urgh. Chip not yet ready to talk to us. */
+               if (time_after(jiffies, timeo)) {
+-                      spin_unlock_bh(chip->mutex);
++                      mutex_unlock(&chip->mutex);
+                       printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
+                       return -EIO;
+               }
+               /* Latency issues. Drop the lock, wait a while and retry */
+-              spin_unlock_bh(chip->mutex);
++              mutex_unlock(&chip->mutex);
+               cfi_udelay(1);
+               goto retry;
+@@ -781,7 +781,7 @@ retry:
+                  someone changes the status */
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               add_wait_queue(&chip->wq, &wait);
+-              spin_unlock_bh(chip->mutex);
++              mutex_unlock(&chip->mutex);
+               schedule();
+               remove_wait_queue(&chip->wq, &wait);
+               timeo = jiffies + HZ;
+@@ -797,9 +797,9 @@ retry:
+       map_write(map, CMD(0xD0), adr);
+       chip->state = FL_ERASING;
+-      spin_unlock_bh(chip->mutex);
++      mutex_unlock(&chip->mutex);
+       msleep(1000);
+-      spin_lock_bh(chip->mutex);
++      mutex_lock(&chip->mutex);
+       /* FIXME. Use a timer to check this, and return immediately. */
+       /* Once the state machine's known to be working I'll do that */
+@@ -810,11 +810,11 @@ retry:
+                       /* Someone's suspended the erase. Sleep */
+                       set_current_state(TASK_UNINTERRUPTIBLE);
+                       add_wait_queue(&chip->wq, &wait);
+-                      spin_unlock_bh(chip->mutex);
++                      mutex_unlock(&chip->mutex);
+                       schedule();
+                       remove_wait_queue(&chip->wq, &wait);
+                       timeo = jiffies + (HZ*20); /* FIXME */
+-                      spin_lock_bh(chip->mutex);
++                      mutex_lock(&chip->mutex);
+                       continue;
+               }
+@@ -828,14 +828,14 @@ retry:
+                       chip->state = FL_STATUS;
+                       printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
+                       DISABLE_VPP(map);
+-                      spin_unlock_bh(chip->mutex);
++                      mutex_unlock(&chip->mutex);
+                       return -EIO;
+               }
+               /* Latency issues. Drop the lock, wait a while and retry */
+-              spin_unlock_bh(chip->mutex);
++              mutex_unlock(&chip->mutex);
+               cfi_udelay(1);
+-              spin_lock_bh(chip->mutex);
++              mutex_lock(&chip->mutex);
+       }
+       DISABLE_VPP(map);
+@@ -878,7 +878,7 @@ retry:
+                               printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
+                               timeo = jiffies + HZ;
+                               chip->state = FL_STATUS;
+-                              spin_unlock_bh(chip->mutex);
++                              mutex_unlock(&chip->mutex);
+                               goto retry;
+                       }
+                       printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
+@@ -887,7 +887,7 @@ retry:
+       }
+       wake_up(&chip->wq);
+-      spin_unlock_bh(chip->mutex);
++      mutex_unlock(&chip->mutex);
+       return ret;
+ }
+@@ -995,7 +995,7 @@ static void cfi_staa_sync (struct mtd_info *mtd)
+               chip = &cfi->chips[i];
+       retry:
+-              spin_lock_bh(chip->mutex);
++              mutex_lock(&chip->mutex);
+               switch(chip->state) {
+               case FL_READY:
+@@ -1009,7 +1009,7 @@ static void cfi_staa_sync (struct mtd_info *mtd)
+                        * with the chip now anyway.
+                        */
+               case FL_SYNCING:
+-                      spin_unlock_bh(chip->mutex);
++                      mutex_unlock(&chip->mutex);
+                       break;
+               default:
+@@ -1017,7 +1017,7 @@ static void cfi_staa_sync (struct mtd_info *mtd)
+                       set_current_state(TASK_UNINTERRUPTIBLE);
+                       add_wait_queue(&chip->wq, &wait);
+-                      spin_unlock_bh(chip->mutex);
++                      mutex_unlock(&chip->mutex);
+                       schedule();
+                       remove_wait_queue(&chip->wq, &wait);
+@@ -1030,13 +1030,13 @@ static void cfi_staa_sync (struct mtd_info *mtd)
+       for (i--; i >=0; i--) {
+               chip = &cfi->chips[i];
+-              spin_lock_bh(chip->mutex);
++              mutex_lock(&chip->mutex);
+               if (chip->state == FL_SYNCING) {
+                       chip->state = chip->oldstate;
+                       wake_up(&chip->wq);
+               }
+-              spin_unlock_bh(chip->mutex);
++              mutex_unlock(&chip->mutex);
+       }
+ }
+@@ -1054,7 +1054,7 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un
+       timeo = jiffies + HZ;
+ retry:
+-      spin_lock_bh(chip->mutex);
++      mutex_lock(&chip->mutex);
+       /* Check that the chip's ready to talk to us. */
+       switch (chip->state) {
+@@ -1071,13 +1071,13 @@ retry:
+               /* Urgh. Chip not yet ready to talk to us. */
+               if (time_after(jiffies, timeo)) {
+-                      spin_unlock_bh(chip->mutex);
++                      mutex_unlock(&chip->mutex);
+                       printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
+                       return -EIO;
+               }
+               /* Latency issues. Drop the lock, wait a while and retry */
+-              spin_unlock_bh(chip->mutex);
++              mutex_unlock(&chip->mutex);
+               cfi_udelay(1);
+               goto retry;
+@@ -1086,7 +1086,7 @@ retry:
+                  someone changes the status */
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               add_wait_queue(&chip->wq, &wait);
+-              spin_unlock_bh(chip->mutex);
++              mutex_unlock(&chip->mutex);
+               schedule();
+               remove_wait_queue(&chip->wq, &wait);
+               timeo = jiffies + HZ;
+@@ -1098,9 +1098,9 @@ retry:
+       map_write(map, CMD(0x01), adr);
+       chip->state = FL_LOCKING;
+-      spin_unlock_bh(chip->mutex);
++      mutex_unlock(&chip->mutex);
+       msleep(1000);
+-      spin_lock_bh(chip->mutex);
++      mutex_lock(&chip->mutex);
+       /* FIXME. Use a timer to check this, and return immediately. */
+       /* Once the state machine's known to be working I'll do that */
+@@ -1118,21 +1118,21 @@ retry:
+                       chip->state = FL_STATUS;
+                       printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
+                       DISABLE_VPP(map);
+-                      spin_unlock_bh(chip->mutex);
++                      mutex_unlock(&chip->mutex);
+                       return -EIO;
+               }
+               /* Latency issues. Drop the lock, wait a while and retry */
+-              spin_unlock_bh(chip->mutex);
++              mutex_unlock(&chip->mutex);
+               cfi_udelay(1);
+-              spin_lock_bh(chip->mutex);
++              mutex_lock(&chip->mutex);
+       }
+       /* Done and happy. */
+       chip->state = FL_STATUS;
+       DISABLE_VPP(map);
+       wake_up(&chip->wq);
+-      spin_unlock_bh(chip->mutex);
++      mutex_unlock(&chip->mutex);
+       return 0;
+ }
+ static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
+@@ -1203,7 +1203,7 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip,
+       timeo = jiffies + HZ;
+ retry:
+-      spin_lock_bh(chip->mutex);
++      mutex_lock(&chip->mutex);
+       /* Check that the chip's ready to talk to us. */
+       switch (chip->state) {
+@@ -1220,13 +1220,13 @@ retry:
+               /* Urgh. Chip not yet ready to talk to us. */
+               if (time_after(jiffies, timeo)) {
+-                      spin_unlock_bh(chip->mutex);
++                      mutex_unlock(&chip->mutex);
+                       printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
+                       return -EIO;
+               }
+               /* Latency issues. Drop the lock, wait a while and retry */
+-              spin_unlock_bh(chip->mutex);
++              mutex_unlock(&chip->mutex);
+               cfi_udelay(1);
+               goto retry;
+@@ -1235,7 +1235,7 @@ retry:
+                  someone changes the status */
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               add_wait_queue(&chip->wq, &wait);
+-              spin_unlock_bh(chip->mutex);
++              mutex_unlock(&chip->mutex);
+               schedule();
+               remove_wait_queue(&chip->wq, &wait);
+               timeo = jiffies + HZ;
+@@ -1247,9 +1247,9 @@ retry:
+       map_write(map, CMD(0xD0), adr);
+       chip->state = FL_UNLOCKING;
+-      spin_unlock_bh(chip->mutex);
++      mutex_unlock(&chip->mutex);
+       msleep(1000);
+-      spin_lock_bh(chip->mutex);
++      mutex_lock(&chip->mutex);
+       /* FIXME. Use a timer to check this, and return immediately. */
+       /* Once the state machine's known to be working I'll do that */
+@@ -1267,21 +1267,21 @@ retry:
+                       chip->state = FL_STATUS;
+                       printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
+                       DISABLE_VPP(map);
+-                      spin_unlock_bh(chip->mutex);
++                      mutex_unlock(&chip->mutex);
+                       return -EIO;
+               }
+               /* Latency issues. Drop the unlock, wait a while and retry */
+-              spin_unlock_bh(chip->mutex);
++              mutex_unlock(&chip->mutex);
+               cfi_udelay(1);
+-              spin_lock_bh(chip->mutex);
++              mutex_lock(&chip->mutex);
+       }
+       /* Done and happy. */
+       chip->state = FL_STATUS;
+       DISABLE_VPP(map);
+       wake_up(&chip->wq);
+-      spin_unlock_bh(chip->mutex);
++      mutex_unlock(&chip->mutex);
+       return 0;
+ }
+ static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
+@@ -1334,7 +1334,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
+       for (i=0; !ret && i<cfi->numchips; i++) {
+               chip = &cfi->chips[i];
+-              spin_lock_bh(chip->mutex);
++              mutex_lock(&chip->mutex);
+               switch(chip->state) {
+               case FL_READY:
+@@ -1354,7 +1354,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
+                       ret = -EAGAIN;
+                       break;
+               }
+-              spin_unlock_bh(chip->mutex);
++              mutex_unlock(&chip->mutex);
+       }
+       /* Unlock the chips again */
+@@ -1363,7 +1363,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
+               for (i--; i >=0; i--) {
+                       chip = &cfi->chips[i];
+-                      spin_lock_bh(chip->mutex);
++                      mutex_lock(&chip->mutex);
+                       if (chip->state == FL_PM_SUSPENDED) {
+                               /* No need to force it into a known state here,
+@@ -1372,7 +1372,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
+                               chip->state = chip->oldstate;
+                               wake_up(&chip->wq);
+                       }
+-                      spin_unlock_bh(chip->mutex);
++                      mutex_unlock(&chip->mutex);
+               }
+       }
+@@ -1390,7 +1390,7 @@ static void cfi_staa_resume(struct mtd_info *mtd)
+               chip = &cfi->chips[i];
+-              spin_lock_bh(chip->mutex);
++              mutex_lock(&chip->mutex);
+               /* Go to known state. Chip may have been power cycled */
+               if (chip->state == FL_PM_SUSPENDED) {
+@@ -1399,7 +1399,7 @@ static void cfi_staa_resume(struct mtd_info *mtd)
+                       wake_up(&chip->wq);
+               }
+-              spin_unlock_bh(chip->mutex);
++              mutex_unlock(&chip->mutex);
+       }
+ }
+diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h
+index ab44f2b..329906b 100644
+--- a/drivers/mtd/chips/fwh_lock.h
++++ b/drivers/mtd/chips/fwh_lock.h
+@@ -58,10 +58,10 @@ static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip,
+        * to flash memory - that means that we don't have to check status
+        * and timeout.
+        */
+-      spin_lock(chip->mutex);
++      mutex_lock(&chip->mutex);
+       ret = get_chip(map, chip, adr, FL_LOCKING);
+       if (ret) {
+-              spin_unlock(chip->mutex);
++              mutex_unlock(&chip->mutex);
+               return ret;
+       }
+@@ -72,7 +72,7 @@ static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip,
+       /* Done and happy. */
+       chip->state = chip->oldstate;
+       put_chip(map, chip, adr);
+-      spin_unlock(chip->mutex);
++      mutex_unlock(&chip->mutex);
+       return 0;
+ }
+diff --git a/drivers/mtd/chips/gen_probe.c b/drivers/mtd/chips/gen_probe.c
+index e2dc964..fcc1bc0 100644
+--- a/drivers/mtd/chips/gen_probe.c
++++ b/drivers/mtd/chips/gen_probe.c
+@@ -155,8 +155,7 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
+                       pchip->start = (i << cfi.chipshift);
+                       pchip->state = FL_READY;
+                       init_waitqueue_head(&pchip->wq);
+-                      spin_lock_init(&pchip->_spinlock);
+-                      pchip->mutex = &pchip->_spinlock;
++                      mutex_init(&pchip->mutex);
+               }
+       }
+diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h
+index d4f38c5..bfc2f1a 100644
+--- a/include/linux/mtd/flashchip.h
++++ b/include/linux/mtd/flashchip.h
+@@ -15,6 +15,7 @@
+  * has asm/spinlock.h, or 2.4, which has linux/spinlock.h
+  */
+ #include <linux/sched.h>
++#include <linux/mutex.h>
+ typedef enum {
+       FL_READY,
+@@ -65,8 +66,7 @@ struct flchip {
+       unsigned int erase_suspended:1;
+       unsigned long in_progress_block_addr;
+-      spinlock_t *mutex;
+-      spinlock_t _spinlock; /* We do it like this because sometimes they'll be shared. */
++      struct mutex mutex;
+       wait_queue_head_t wq; /* Wait on here when we're waiting for the chip
+                            to be ready */
+       int word_write_time;
+-- 
+1.7.3.4
+
diff --git a/kernel-power-2.6.28/debian/patches/0002-mtd-change-struct-flchip_shared-spinlock-locking-int.diff b/kernel-power-2.6.28/debian/patches/0002-mtd-change-struct-flchip_shared-spinlock-locking-int.diff
new file mode 100644 (file)
index 0000000..11a7224
--- /dev/null
@@ -0,0 +1,125 @@
+From 2ccd3ea4bd9e62a348023bbf98988d01b6b5d7db Mon Sep 17 00:00:00 2001
+From: Stefani Seibold <stefani@seibold.net>
+Date: Thu, 5 Aug 2010 09:19:26 +0200
+Subject: [PATCH 2/2] mtd: change struct flchip_shared spinlock locking into mutex
+
+This patch prevent to schedule while atomic by changing the
+flchip_shared spinlock into a mutex. This should be save since no atomic
+path will use this lock.
+
+It was suggested by Arnd Bergmann and Vasiliy Kulikov.
+
+Signed-off-by: Stefani Seibold <stefani@seibold.net>
+Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
+Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
+---
+ drivers/mtd/chips/cfi_cmdset_0001.c |   20 ++++++++++----------
+ include/linux/mtd/flashchip.h       |    2 +-
+ 2 files changed, 11 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
+index c9c3517..892a862 100644
+--- a/drivers/mtd/chips/cfi_cmdset_0001.c
++++ b/drivers/mtd/chips/cfi_cmdset_0001.c
+@@ -687,7 +687,7 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
+               chip = &newcfi->chips[0];
+               for (i = 0; i < cfi->numchips; i++) {
+                       shared[i].writing = shared[i].erasing = NULL;
+-                      spin_lock_init(&shared[i].lock);
++                      mutex_init(&shared[i].lock);
+                       for (j = 0; j < numparts; j++) {
+                               *chip = cfi->chips[i];
+                               chip->start += j << partshift;
+@@ -856,7 +856,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
+                */
+               struct flchip_shared *shared = chip->priv;
+               struct flchip *contender;
+-              spin_lock(&shared->lock);
++              mutex_lock(&shared->lock);
+               contender = shared->writing;
+               if (contender && contender != chip) {
+                       /*
+@@ -869,7 +869,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
+                        * get_chip returns success we're clear to go ahead.
+                        */
+                       ret = mutex_trylock(&contender->mutex);
+-                      spin_unlock(&shared->lock);
++                      mutex_unlock(&shared->lock);
+                       if (!ret)
+                               goto retry;
+                       mutex_unlock(&chip->mutex);
+@@ -884,7 +884,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
+                               mutex_unlock(&contender->mutex);
+                               return ret;
+                       }
+-                      spin_lock(&shared->lock);
++                      mutex_lock(&shared->lock);
+                       /* We should not own chip if it is already
+                        * in FL_SYNCING state. Put contender and retry. */
+@@ -900,7 +900,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
+                * on this chip. Sleep. */
+               if (mode == FL_ERASING && shared->erasing
+                   && shared->erasing->oldstate == FL_ERASING) {
+-                      spin_unlock(&shared->lock);
++                      mutex_unlock(&shared->lock);
+                       set_current_state(TASK_UNINTERRUPTIBLE);
+                       add_wait_queue(&chip->wq, &wait);
+                       mutex_unlock(&chip->mutex);
+@@ -914,7 +914,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
+               shared->writing = chip;
+               if (mode == FL_ERASING)
+                       shared->erasing = chip;
+-              spin_unlock(&shared->lock);
++              mutex_unlock(&shared->lock);
+       }
+       ret = chip_ready(map, chip, adr, mode);
+       if (ret == -EAGAIN)
+@@ -929,7 +929,7 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
+       if (chip->priv) {
+               struct flchip_shared *shared = chip->priv;
+-              spin_lock(&shared->lock);
++              mutex_lock(&shared->lock);
+               if (shared->writing == chip && chip->oldstate == FL_READY) {
+                       /* We own the ability to write, but we're done */
+                       shared->writing = shared->erasing;
+@@ -937,7 +937,7 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
+                               /* give back ownership to who we loaned it from */
+                               struct flchip *loaner = shared->writing;
+                               mutex_lock(&loaner->mutex);
+-                              spin_unlock(&shared->lock);
++                              mutex_unlock(&shared->lock);
+                               mutex_unlock(&chip->mutex);
+                               put_chip(map, loaner, loaner->start);
+                               mutex_lock(&chip->mutex);
+@@ -955,11 +955,11 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
+                        * Don't let the switch below mess things up since
+                        * we don't have ownership to resume anything.
+                        */
+-                      spin_unlock(&shared->lock);
++                      mutex_unlock(&shared->lock);
+                       wake_up(&chip->wq);
+                       return;
+               }
+-              spin_unlock(&shared->lock);
++              mutex_unlock(&shared->lock);
+       }
+       switch(chip->oldstate) {
+diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h
+index bfc2f1a..fa6db00 100644
+--- a/include/linux/mtd/flashchip.h
++++ b/include/linux/mtd/flashchip.h
+@@ -83,7 +83,7 @@ struct flchip {
+ /* This is used to handle contention on write/erase operations
+    between partitions of the same physical chip. */
+ struct flchip_shared {
+-      spinlock_t lock;
++      struct mutex lock;
+       struct flchip *writing;
+       struct flchip *erasing;
+ };
+-- 
+1.7.3.4
+
diff --git a/kernel-power-2.6.28/debian/patches/ARM_6066_1_Fix_BUG_scheduling_while_atomic_swapper_0_0x00000002.diff b/kernel-power-2.6.28/debian/patches/ARM_6066_1_Fix_BUG_scheduling_while_atomic_swapper_0_0x00000002.diff
new file mode 100644 (file)
index 0000000..48df839
--- /dev/null
@@ -0,0 +1,48 @@
+commit 13ea9cc82138691856d7cd855dff9aef1479adb9
+Author: Santosh Shilimkar <santosh.shilimkar@ti.com>
+Date:   Fri Apr 30 06:51:20 2010 +0100
+
+    ARM: 6066/1: Fix "BUG: scheduling while atomic: swapper/0/0x00000002
+    
+    This patch fixes the preempt leak in the cpuidle path invoked from
+    cpu-hotplug. The fix is suggested by Russell King and is based
+    on x86 idea of calling init_idle() on the idle task when it's
+    re-used which also resets the preempt count amongst other things
+    
+    dump:
+    BUG: scheduling while atomic: swapper/0/0x00000002
+    Modules linked in:
+    Backtrace:
+    [<c0024f90>] (dump_backtrace+0x0/0x110) from [<c0173bc4>] (dump_stack+0x18/0x1c)
+     r7:c02149e4 r6:c033df00 r5:c7836000 r4:00000000
+    [<c0173bac>] (dump_stack+0x0/0x1c) from [<c003b4f0>] (__schedule_bug+0x60/0x70)
+    [<c003b490>] (__schedule_bug+0x0/0x70) from [<c0174214>] (schedule+0x98/0x7b8)
+     r5:c7836000 r4:c7836000
+    [<c017417c>] (schedule+0x0/0x7b8) from [<c00228c4>] (cpu_idle+0xb4/0xd4)
+    # [<c0022810>] (cpu_idle+0x0/0xd4) from [<c0171dd8>] (secondary_start_kernel+0xe0/0xf0)
+     r5:c7836000 r4:c0205f40
+    [<c0171cf8>] (secondary_start_kernel+0x0/0xf0) from [<c002d57c>] (prm_rmw_mod_reg_bits+0x88/0xa4)
+     r7:c02149e4 r6:00000001 r5:00000001 r4:c7836000
+    Backtrace aborted due to bad frame pointer <c7837fbc>
+    
+    Cc: Catalin Marinas <catalin.marinas@arm.com>
+    Signed-off-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
+    Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+
+diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
+index 577543f..a01194e 100644
+--- a/arch/arm/kernel/smp.c
++++ b/arch/arm/kernel/smp.c
+@@ -86,6 +86,12 @@ int __cpuinit __cpu_up(unsigned int cpu)
+                       return PTR_ERR(idle);
+               }
+               ci->idle = idle;
++      } else {
++              /*
++               * Since this idle thread is being re-used, call
++               * init_idle() to reinitialize the thread structure.
++               */
++              init_idle(idle, cpu);
+       }
+       /*
diff --git a/kernel-power-2.6.28/debian/patches/USB-g_serial-don-t-set-low_latency-flag.diff b/kernel-power-2.6.28/debian/patches/USB-g_serial-don-t-set-low_latency-flag.diff
new file mode 100644 (file)
index 0000000..7e6db7a
--- /dev/null
@@ -0,0 +1,59 @@
+From a325a46edc23a9c267ac060299f3ba9955c1ca57 Mon Sep 17 00:00:00 2001
+From: Jon Povey <jon.povey@racelogic.co.uk>
+Date: Mon, 14 Jun 2010 19:41:04 +0900
+Subject: [PATCH] USB: g_serial: don't set low_latency flag
+
+No longer set low_latency flag as it causes this warning backtrace:
+
+  WARNING: at kernel/mutex.c:207 __mutex_lock_slowpath+0x6c/0x288()
+
+Fix associated locking and wakeups.
+
+Signed-off-by: Jon Povey <jon.povey@racelogic.co.uk>
+Cc: Maulik Mankad <x0082077@ti.com>
+Cc: stable <stable@kernel.org>
+Acked-by: David Brownell <dbrownell@users.sourceforge.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ drivers/usb/gadget/u_serial.c |   15 ++-------------
+ 1 files changed, 2 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
+index 5c83a87..b09c4b2 100644
+--- a/drivers/usb/gadget/u_serial.c
++++ b/drivers/usb/gadget/u_serial.c
+@@ -550,17 +550,11 @@ recycle:
+               list_move(&req->list, &port->read_pool);
+       }
+-      /* Push from tty to ldisc; this is immediate with low_latency, and
+-       * may trigger callbacks to this driver ... so drop the spinlock.
++      /* Push from tty to ldisc; without low_latency set this is handled by
++       * a workqueue, so we won't get callbacks and can hold port_lock
+        */
+       if (tty && do_push) {
+-              spin_unlock_irq(&port->port_lock);
+               tty_flip_buffer_push(tty);
+-              wake_up_interruptible(&tty->read_wait);
+-              spin_lock_irq(&port->port_lock);
+-
+-              /* tty may have been closed */
+-              tty = port->port_tty;
+       }
+@@ -798,11 +792,6 @@ static int gs_open(struct tty_struct *tty, struct file *file)
+       port->open_count = 1;
+       port->openclose = false;
+-      /* low_latency means ldiscs work in tasklet context, without
+-       * needing a workqueue schedule ... easier to keep up.
+-       */
+-      tty->low_latency = 1;
+-
+       /* if connected, start the I/O stream */
+       if (port->port_usb) {
+               struct gserial  *gser = port->port_usb;
+-- 
+1.7.3.4
+
diff --git a/kernel-power-2.6.28/debian/patches/bluetooth_scheduling_while_atomic_bug_fix.diff b/kernel-power-2.6.28/debian/patches/bluetooth_scheduling_while_atomic_bug_fix.diff
new file mode 100644 (file)
index 0000000..5fcee18
--- /dev/null
@@ -0,0 +1,64 @@
+commit f74c77cb1124a11acf69c98d10c0fdc22f322664
+Author: Dave Young <hidave.darkstar@gmail.com>
+Date:   Sun Oct 18 20:24:41 2009 +0000
+
+    bluetooth: scheduling while atomic bug fix
+    
+    Due to driver core changes dev_set_drvdata will call kzalloc which should be
+    in might_sleep context, but hci_conn_add will be called in atomic context
+    
+    Like dev_set_name move dev_set_drvdata to work queue function.
+    
+    oops as following:
+    
+    Oct  2 17:41:59 darkstar kernel: [  438.001341] BUG: sleeping function called from invalid context at mm/slqb.c:1546
+    Oct  2 17:41:59 darkstar kernel: [  438.001345] in_atomic(): 1, irqs_disabled(): 0, pid: 2133, name: sdptool
+    Oct  2 17:41:59 darkstar kernel: [  438.001348] 2 locks held by sdptool/2133:
+    Oct  2 17:41:59 darkstar kernel: [  438.001350]  #0:  (sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP){+.+.+.}, at: [<faa1d2f5>] lock_sock+0xa/0xc [l2cap]
+    Oct  2 17:41:59 darkstar kernel: [  438.001360]  #1:  (&hdev->lock){+.-.+.}, at: [<faa20e16>] l2cap_sock_connect+0x103/0x26b [l2cap]
+    Oct  2 17:41:59 darkstar kernel: [  438.001371] Pid: 2133, comm: sdptool Not tainted 2.6.31-mm1 #2
+    Oct  2 17:41:59 darkstar kernel: [  438.001373] Call Trace:
+    Oct  2 17:41:59 darkstar kernel: [  438.001381]  [<c022433f>] __might_sleep+0xde/0xe5
+    Oct  2 17:41:59 darkstar kernel: [  438.001386]  [<c0298843>] __kmalloc+0x4a/0x15a
+    Oct  2 17:41:59 darkstar kernel: [  438.001392]  [<c03f0065>] ? kzalloc+0xb/0xd
+    Oct  2 17:41:59 darkstar kernel: [  438.001396]  [<c03f0065>] kzalloc+0xb/0xd
+    Oct  2 17:41:59 darkstar kernel: [  438.001400]  [<c03f04ff>] device_private_init+0x15/0x3d
+    Oct  2 17:41:59 darkstar kernel: [  438.001405]  [<c03f24c5>] dev_set_drvdata+0x18/0x26
+    Oct  2 17:41:59 darkstar kernel: [  438.001414]  [<fa51fff7>] hci_conn_init_sysfs+0x40/0xd9 [bluetooth]
+    Oct  2 17:41:59 darkstar kernel: [  438.001422]  [<fa51cdc0>] ? hci_conn_add+0x128/0x186 [bluetooth]
+    Oct  2 17:41:59 darkstar kernel: [  438.001429]  [<fa51ce0f>] hci_conn_add+0x177/0x186 [bluetooth]
+    Oct  2 17:41:59 darkstar kernel: [  438.001437]  [<fa51cf8a>] hci_connect+0x3c/0xfb [bluetooth]
+    Oct  2 17:41:59 darkstar kernel: [  438.001442]  [<faa20e87>] l2cap_sock_connect+0x174/0x26b [l2cap]
+    Oct  2 17:41:59 darkstar kernel: [  438.001448]  [<c04c8df5>] sys_connect+0x60/0x7a
+    Oct  2 17:41:59 darkstar kernel: [  438.001453]  [<c024b703>] ? lock_release_non_nested+0x84/0x1de
+    Oct  2 17:41:59 darkstar kernel: [  438.001458]  [<c028804b>] ? might_fault+0x47/0x81
+    Oct  2 17:41:59 darkstar kernel: [  438.001462]  [<c028804b>] ? might_fault+0x47/0x81
+    Oct  2 17:41:59 darkstar kernel: [  438.001468]  [<c033361f>] ? __copy_from_user_ll+0x11/0xce
+    Oct  2 17:41:59 darkstar kernel: [  438.001472]  [<c04c9419>] sys_socketcall+0x82/0x17b
+    Oct  2 17:41:59 darkstar kernel: [  438.001477]  [<c020329d>] syscall_call+0x7/0xb
+    
+    Signed-off-by: Dave Young <hidave.darkstar@gmail.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
+index 7f939ce..2bc6f6a 100644
+--- a/net/bluetooth/hci_sysfs.c
++++ b/net/bluetooth/hci_sysfs.c
+@@ -92,6 +92,8 @@ static void add_conn(struct work_struct *work)
+       dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
++      dev_set_drvdata(&conn->dev, conn);
++
+       if (device_add(&conn->dev) < 0) {
+               BT_ERR("Failed to register connection device");
+               return;
+@@ -144,8 +146,6 @@ void hci_conn_init_sysfs(struct hci_conn *conn)
+       conn->dev.class = bt_class;
+       conn->dev.parent = &hdev->dev;
+-      dev_set_drvdata(&conn->dev, conn);
+-
+       device_initialize(&conn->dev);
+       INIT_WORK(&conn->work_add, add_conn);
diff --git a/kernel-power-2.6.28/debian/patches/mac80211_fix_allocation_in_mesh_queue_preq.diff b/kernel-power-2.6.28/debian/patches/mac80211_fix_allocation_in_mesh_queue_preq.diff
new file mode 100644 (file)
index 0000000..8df4b21
--- /dev/null
@@ -0,0 +1,84 @@
+commit 59615b5f9d1323898ca94e88e595b5b04115076a
+Author: Andrey Yurovsky <andrey@cozybit.com>
+Date:   Thu Jun 25 16:07:42 2009 -0700
+
+    mac80211: fix allocation in mesh_queue_preq
+    
+    We allocate a PREQ queue node in mesh_queue_preq, however the allocation
+    may cause us to sleep.  Use GFP_ATOMIC to prevent this.
+    
+    [ 1869.126498] BUG: scheduling while atomic: ping/1859/0x10000100
+    [ 1869.127164] Modules linked in: ath5k mac80211 ath
+    [ 1869.128310] Pid: 1859, comm: ping Not tainted 2.6.30-wl #1
+    [ 1869.128754] Call Trace:
+    [ 1869.129293]  [<c1023a2b>] __schedule_bug+0x48/0x4d
+    [ 1869.129866]  [<c13b5533>] __schedule+0x77/0x67a
+    [ 1869.130544]  [<c1026f2e>] ? release_console_sem+0x17d/0x185
+    [ 1869.131568]  [<c807cf47>] ? mesh_queue_preq+0x2b/0x165 [mac80211]
+    [ 1869.132318]  [<c13b5b3e>] schedule+0x8/0x1f
+    [ 1869.132807]  [<c1023c12>] __cond_resched+0x16/0x2f
+    [ 1869.133478]  [<c13b5bf0>] _cond_resched+0x27/0x32
+    [ 1869.134191]  [<c108a370>] kmem_cache_alloc+0x1c/0xcf
+    [ 1869.134714]  [<c10273ae>] ? printk+0x15/0x17
+    [ 1869.135670]  [<c807cf47>] mesh_queue_preq+0x2b/0x165 [mac80211]
+    [ 1869.136731]  [<c807d1f8>] mesh_nexthop_lookup+0xee/0x12d [mac80211]
+    [ 1869.138130]  [<c807417e>] ieee80211_xmit+0xe6/0x2b2 [mac80211]
+    [ 1869.138935]  [<c80be46d>] ? ath5k_hw_setup_rx_desc+0x0/0x66 [ath5k]
+    [ 1869.139831]  [<c80c97bc>] ? ath5k_tasklet_rx+0xba/0x506 [ath5k]
+    [ 1869.140863]  [<c8075191>] ieee80211_subif_start_xmit+0x6c9/0x6e4
+    [mac80211]
+    [ 1869.141665]  [<c105cf1c>] ? handle_level_irq+0x78/0x9d
+    [ 1869.142390]  [<c12e3f93>] dev_hard_start_xmit+0x168/0x1c7
+    [ 1869.143092]  [<c12f1f17>] __qdisc_run+0xe1/0x1b7
+    [ 1869.143612]  [<c12e25ff>] qdisc_run+0x18/0x1a
+    [ 1869.144248]  [<c12e62f4>] dev_queue_xmit+0x16a/0x25a
+    [ 1869.144785]  [<c13b6dcc>] ? _read_unlock_bh+0xe/0x10
+    [ 1869.145465]  [<c12eacdb>] neigh_resolve_output+0x19c/0x1c7
+    [ 1869.146182]  [<c130e2da>] ? ip_finish_output+0x0/0x51
+    [ 1869.146697]  [<c130e2a0>] ip_finish_output2+0x182/0x1bc
+    [ 1869.147358]  [<c130e327>] ip_finish_output+0x4d/0x51
+    [ 1869.147863]  [<c130e9d5>] ip_output+0x80/0x85
+    [ 1869.148515]  [<c130cc49>] dst_output+0x9/0xb
+    [ 1869.149141]  [<c130dec6>] ip_local_out+0x17/0x1a
+    [ 1869.149632]  [<c130e0bc>] ip_push_pending_frames+0x1f3/0x255
+    [ 1869.150343]  [<c13247ff>] raw_sendmsg+0x5e6/0x667
+    [ 1869.150883]  [<c1033c55>] ? insert_work+0x6a/0x73
+    [ 1869.151834]  [<c8071e00>] ?
+    ieee80211_invoke_rx_handlers+0x17da/0x1ae8 [mac80211]
+    [ 1869.152630]  [<c132bd68>] inet_sendmsg+0x3b/0x48
+    [ 1869.153232]  [<c12d7deb>] __sock_sendmsg+0x45/0x4e
+    [ 1869.153740]  [<c12d8537>] sock_sendmsg+0xb8/0xce
+    [ 1869.154519]  [<c80be46d>] ? ath5k_hw_setup_rx_desc+0x0/0x66 [ath5k]
+    [ 1869.155289]  [<c1036b25>] ? autoremove_wake_function+0x0/0x30
+    [ 1869.155859]  [<c115992b>] ? __copy_from_user_ll+0x11/0xce
+    [ 1869.156573]  [<c1159d99>] ? copy_from_user+0x31/0x54
+    [ 1869.157235]  [<c12df646>] ? verify_iovec+0x40/0x6e
+    [ 1869.157778]  [<c12d869a>] sys_sendmsg+0x14d/0x1a5
+    [ 1869.158714]  [<c8072c40>] ? __ieee80211_rx+0x49e/0x4ee [mac80211]
+    [ 1869.159641]  [<c80c83fe>] ? ath5k_rxbuf_setup+0x6d/0x8d [ath5k]
+    [ 1869.160543]  [<c80be46d>] ? ath5k_hw_setup_rx_desc+0x0/0x66 [ath5k]
+    [ 1869.161434]  [<c80beba4>] ? ath5k_hw_get_rxdp+0xe/0x10 [ath5k]
+    [ 1869.162319]  [<c80c97bc>] ? ath5k_tasklet_rx+0xba/0x506 [ath5k]
+    [ 1869.163063]  [<c1005627>] ? enable_8259A_irq+0x40/0x43
+    [ 1869.163594]  [<c101edb8>] ? __dequeue_entity+0x23/0x27
+    [ 1869.164793]  [<c100187a>] ? __switch_to+0x2b/0x105
+    [ 1869.165442]  [<c1021d5f>] ? finish_task_switch+0x5b/0x74
+    [ 1869.166129]  [<c12d963a>] sys_socketcall+0x14b/0x17b
+    [ 1869.166612]  [<c1002b95>] syscall_call+0x7/0xb
+    
+    Signed-off-by: Andrey Yurovsky <andrey@cozybit.com>
+    Signed-off-by: John W. Linville <linville@tuxdriver.com>
+
+diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
+index 003cb47..f49ef28 100644
+--- a/net/mac80211/mesh_hwmp.c
++++ b/net/mac80211/mesh_hwmp.c
+@@ -637,7 +637,7 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
+       struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+       struct mesh_preq_queue *preq_node;
+-      preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_KERNEL);
++      preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_ATOMIC);
+       if (!preq_node) {
+               printk(KERN_DEBUG "Mesh HWMP: could not allocate PREQ node\n");
+               return;
index eb7e6bc..5ed9ea5 100644 (file)
@@ -57,3 +57,9 @@ overclock_smartreflex_900.diff
 dspbridge.diff
 shutdown-reboot-patch.diff
 option_disable_indicator_led.diff
+ARM_6066_1_Fix_BUG_scheduling_while_atomic_swapper_0_0x00000002.diff
+bluetooth_scheduling_while_atomic_bug_fix.diff
+mac80211_fix_allocation_in_mesh_queue_preq.diff
+USB-g_serial-don-t-set-low_latency-flag.diff
+0001-mtd-fix-a-huge-latency-problem-in-the-MTD-CFI-flash-.diff
+0002-mtd-change-struct-flchip_shared-spinlock-locking-int.diff