--- /dev/null
+From 5a2ef8426802c6c76df555b83cab4522f5bdf1bc Mon Sep 17 00:00:00 2001
+From: Stefani Seibold <stefani@seibold.net>
+Date: Sun, 18 Apr 2010 22:46:44 +0200
+Subject: [PATCH 1/2] mtd: fix a huge latency problem in the MTD CFI flash driver.
+
+The use of a memcpy() during a spinlock operation will cause very long
+thread context switch delays if the flash chip bandwidth is low and the
+data to be copied large, because a spinlock will disable preemption.
+
+For example: A flash with 6,5 MB/s bandwidth will cause under ubifs,
+which request sometimes 128 KiB (the flash erase size), a preemption delay of
+20 milliseconds. High priority threads will not be served during this
+time, regardless whether this threads access the flash or not. This behavior
+breaks real time.
+
+The patch changes all the use of spin_lock operations for xxxx->mutex
+into mutex operations, which is exact what the name says and means.
+
+I have checked the code of the drivers and there is no use of atomic
+pathes like interrupt or timers. The mtdoops facility will also not be used
+by this drivers. So it is dave to replace the spin_lock against mutex.
+
+There is no performance regression since the mutex is normally not
+acquired.
+
+Changelog:
+ 06.03.2010 First release
+ 26.03.2010 Fix mutex[1] issue and tested it for compile failure
+
+Signed-off-by: Stefani Seibold <stefani@seibold.net>
+Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
+Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
+---
+ drivers/mtd/chips/cfi_cmdset_0001.c | 131 +++++++++++++++++-----------------
+ drivers/mtd/chips/cfi_cmdset_0002.c | 124 ++++++++++++++++----------------
+ drivers/mtd/chips/cfi_cmdset_0020.c | 136 +++++++++++++++++-----------------
+ drivers/mtd/chips/fwh_lock.h | 6 +-
+ drivers/mtd/chips/gen_probe.c | 3 +-
+ include/linux/mtd/flashchip.h | 4 +-
+ 6 files changed, 201 insertions(+), 203 deletions(-)
+
+diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
+index c93a8be..c9c3517 100644
+--- a/drivers/mtd/chips/cfi_cmdset_0001.c
++++ b/drivers/mtd/chips/cfi_cmdset_0001.c
+@@ -695,8 +695,7 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
+ /* those should be reset too since
+ they create memory references. */
+ init_waitqueue_head(&chip->wq);
+- spin_lock_init(&chip->_spinlock);
+- chip->mutex = &chip->_spinlock;
++ mutex_init(&chip->mutex);
+ chip++;
+ }
+ }
+@@ -742,9 +741,9 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
+ if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
+ break;
+
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+ /* Someone else might have been playing with it. */
+ return -EAGAIN;
+ }
+@@ -791,9 +790,9 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
+ return -EIO;
+ }
+
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+ /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
+ So we can just loop here. */
+ }
+@@ -820,10 +819,10 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
+ sleep:
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+ return -EAGAIN;
+ }
+ }
+@@ -869,20 +868,20 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
+ * it'll happily send us to sleep. In any case, when
+ * get_chip returns success we're clear to go ahead.
+ */
+- ret = spin_trylock(contender->mutex);
++ ret = mutex_trylock(&contender->mutex);
+ spin_unlock(&shared->lock);
+ if (!ret)
+ goto retry;
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ ret = chip_ready(map, contender, contender->start, mode);
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+
+ if (ret == -EAGAIN) {
+- spin_unlock(contender->mutex);
++ mutex_unlock(&contender->mutex);
+ goto retry;
+ }
+ if (ret) {
+- spin_unlock(contender->mutex);
++ mutex_unlock(&contender->mutex);
+ return ret;
+ }
+ spin_lock(&shared->lock);
+@@ -891,10 +890,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
+ * in FL_SYNCING state. Put contender and retry. */
+ if (chip->state == FL_SYNCING) {
+ put_chip(map, contender, contender->start);
+- spin_unlock(contender->mutex);
++ mutex_unlock(&contender->mutex);
+ goto retry;
+ }
+- spin_unlock(contender->mutex);
++ mutex_unlock(&contender->mutex);
+ }
+
+ /* Check if we already have suspended erase
+@@ -904,10 +903,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
+ spin_unlock(&shared->lock);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+ goto retry;
+ }
+
+@@ -937,12 +936,12 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
+ if (shared->writing && shared->writing != chip) {
+ /* give back ownership to who we loaned it from */
+ struct flchip *loaner = shared->writing;
+- spin_lock(loaner->mutex);
++ mutex_lock(&loaner->mutex);
+ spin_unlock(&shared->lock);
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ put_chip(map, loaner, loaner->start);
+- spin_lock(chip->mutex);
+- spin_unlock(loaner->mutex);
++ mutex_lock(&chip->mutex);
++ mutex_unlock(&loaner->mutex);
+ wake_up(&chip->wq);
+ return;
+ }
+@@ -1112,7 +1111,7 @@ static int __xipram xip_wait_for_operation(
+ (void) map_read(map, adr);
+ xip_iprefetch();
+ local_irq_enable();
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ xip_iprefetch();
+ cond_resched();
+
+@@ -1122,15 +1121,15 @@ static int __xipram xip_wait_for_operation(
+ * a suspended erase state. If so let's wait
+ * until it's done.
+ */
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+ while (chip->state != newstate) {
+ DECLARE_WAITQUEUE(wait, current);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+ }
+ /* Disallow XIP again */
+ local_irq_disable();
+@@ -1186,10 +1185,10 @@ static int inval_cache_and_wait_for_operation(
+ int chip_state = chip->state;
+ unsigned int timeo, sleep_time, reset_timeo;
+
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ if (inval_len)
+ INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+
+ timeo = chip_op_time_max;
+ if (!timeo)
+@@ -1209,7 +1208,7 @@ static int inval_cache_and_wait_for_operation(
+ }
+
+ /* OK Still waiting. Drop the lock, wait a while and retry. */
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ if (sleep_time >= 1000000/HZ) {
+ /*
+ * Half of the normal delay still remaining
+@@ -1224,17 +1223,17 @@ static int inval_cache_and_wait_for_operation(
+ cond_resched();
+ timeo--;
+ }
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+
+ while (chip->state != chip_state) {
+ /* Someone's suspended the operation: sleep */
+ DECLARE_WAITQUEUE(wait, current);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+ }
+ if (chip->erase_suspended || chip->write_suspended) {
+ /* Suspend has occured while sleep: reset timeout */
+@@ -1266,7 +1265,7 @@ static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t a
+ /* Ensure cmd read/writes are aligned. */
+ cmd_addr = adr & ~(map_bankwidth(map)-1);
+
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+
+ ret = get_chip(map, chip, cmd_addr, FL_POINT);
+
+@@ -1277,7 +1276,7 @@ static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t a
+ chip->state = FL_POINT;
+ chip->ref_point_counter++;
+ }
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+
+ return ret;
+ }
+@@ -1362,7 +1361,7 @@ static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+ else
+ thislen = len;
+
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+ if (chip->state == FL_POINT) {
+ chip->ref_point_counter--;
+ if(chip->ref_point_counter == 0)
+@@ -1371,7 +1370,7 @@ static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+ printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
+
+ put_chip(map, chip, chip->start);
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+
+ len -= thislen;
+ ofs = 0;
+@@ -1390,10 +1389,10 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
+ /* Ensure cmd read/writes are aligned. */
+ cmd_addr = adr & ~(map_bankwidth(map)-1);
+
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, cmd_addr, FL_READY);
+ if (ret) {
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+@@ -1407,7 +1406,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
+
+ put_chip(map, chip, cmd_addr);
+
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ return 0;
+ }
+
+@@ -1470,10 +1469,10 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
+ return -EINVAL;
+ }
+
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, adr, mode);
+ if (ret) {
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+@@ -1519,7 +1518,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
+
+ xip_enable(map, chip, adr);
+ out: put_chip(map, chip, adr);
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+@@ -1628,10 +1627,10 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
+ /* Let's determine this according to the interleave only once */
+ write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
+
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, cmd_adr, FL_WRITING);
+ if (ret) {
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+@@ -1762,7 +1761,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
+
+ xip_enable(map, chip, cmd_adr);
+ out: put_chip(map, chip, cmd_adr);
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+@@ -1841,10 +1840,10 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
+ adr += chip->start;
+
+ retry:
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, adr, FL_ERASING);
+ if (ret) {
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+@@ -1900,7 +1899,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
+ } else if (chipstatus & 0x20 && retries--) {
+ printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
+ put_chip(map, chip, adr);
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ goto retry;
+ } else {
+ printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
+@@ -1912,7 +1911,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
+
+ xip_enable(map, chip, adr);
+ out: put_chip(map, chip, adr);
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+@@ -1945,7 +1944,7 @@ static void cfi_intelext_sync (struct mtd_info *mtd)
+ for (i=0; !ret && i<cfi->numchips; i++) {
+ chip = &cfi->chips[i];
+
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, chip->start, FL_SYNCING);
+
+ if (!ret) {
+@@ -1956,7 +1955,7 @@ static void cfi_intelext_sync (struct mtd_info *mtd)
+ * with the chip now anyway.
+ */
+ }
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ }
+
+ /* Unlock the chips again */
+@@ -1964,14 +1963,14 @@ static void cfi_intelext_sync (struct mtd_info *mtd)
+ for (i--; i >=0; i--) {
+ chip = &cfi->chips[i];
+
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+
+ if (chip->state == FL_SYNCING) {
+ chip->state = chip->oldstate;
+ chip->oldstate = FL_READY;
+ wake_up(&chip->wq);
+ }
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ }
+ }
+
+@@ -2017,10 +2016,10 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
+
+ adr += chip->start;
+
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, adr, FL_LOCKING);
+ if (ret) {
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+@@ -2054,7 +2053,7 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
+
+ xip_enable(map, chip, adr);
+ out: put_chip(map, chip, adr);
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+@@ -2119,10 +2118,10 @@ do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
+ struct cfi_private *cfi = map->fldrv_priv;
+ int ret;
+
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
+ if (ret) {
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+@@ -2141,7 +2140,7 @@ do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
+ INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
+
+ put_chip(map, chip, chip->start);
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ return 0;
+ }
+
+@@ -2416,7 +2415,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
+ for (i=0; !ret && i<cfi->numchips; i++) {
+ chip = &cfi->chips[i];
+
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+
+ switch (chip->state) {
+ case FL_READY:
+@@ -2448,7 +2447,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
+ case FL_PM_SUSPENDED:
+ break;
+ }
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ }
+
+ /* Unlock the chips again */
+@@ -2457,7 +2456,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
+ for (i--; i >=0; i--) {
+ chip = &cfi->chips[i];
+
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+
+ if (chip->state == FL_PM_SUSPENDED) {
+ /* No need to force it into a known state here,
+@@ -2467,7 +2466,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
+ chip->oldstate = FL_READY;
+ wake_up(&chip->wq);
+ }
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ }
+ }
+
+@@ -2508,7 +2507,7 @@ static void cfi_intelext_resume(struct mtd_info *mtd)
+
+ chip = &cfi->chips[i];
+
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+
+ /* Go to known state. Chip may have been power cycled */
+ if (chip->state == FL_PM_SUSPENDED) {
+@@ -2517,7 +2516,7 @@ static void cfi_intelext_resume(struct mtd_info *mtd)
+ wake_up(&chip->wq);
+ }
+
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ }
+
+ if ((mtd->flags & MTD_POWERUP_LOCK)
+@@ -2537,13 +2536,13 @@ static int cfi_intelext_reset(struct mtd_info *mtd)
+ /* force the completion of any ongoing operation
+ and switch to array mode so any bootloader in
+ flash is accessible for soft reboot. */
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
+ if (!ret) {
+ map_write(map, CMD(0xff), chip->start);
+ chip->state = FL_SHUTDOWN;
+ }
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ }
+
+ return 0;
+diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
+index d74ec46..090c394 100644
+--- a/drivers/mtd/chips/cfi_cmdset_0002.c
++++ b/drivers/mtd/chips/cfi_cmdset_0002.c
+@@ -563,12 +563,12 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
+
+ if (time_after(jiffies, timeo)) {
+ printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
+- spin_unlock(chip->mutex);
++ mutex_unlock(chip->mutex);
+ return -EIO;
+ }
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+ /* Someone else might have been playing with it. */
+ goto retry;
+ }
+@@ -618,9 +618,9 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
+ return -EIO;
+ }
+
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+ /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
+ So we can just loop here. */
+ }
+@@ -644,10 +644,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
+ sleep:
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+ goto resettime;
+ }
+ }
+@@ -779,7 +779,7 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
+ (void) map_read(map, adr);
+ xip_iprefetch();
+ local_irq_enable();
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ xip_iprefetch();
+ cond_resched();
+
+@@ -789,15 +789,15 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
+ * a suspended erase state. If so let's wait
+ * until it's done.
+ */
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+ while (chip->state != FL_XIP_WHILE_ERASING) {
+ DECLARE_WAITQUEUE(wait, current);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+ }
+ /* Disallow XIP again */
+ local_irq_disable();
+@@ -859,17 +859,17 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
+
+ #define UDELAY(map, chip, adr, usec) \
+ do { \
+- spin_unlock(chip->mutex); \
++ mutex_unlock(&chip->mutex); \
+ cfi_udelay(usec); \
+- spin_lock(chip->mutex); \
++ mutex_lock(&chip->mutex); \
+ } while (0)
+
+ #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
+ do { \
+- spin_unlock(chip->mutex); \
++ mutex_unlock(&chip->mutex); \
+ INVALIDATE_CACHED_RANGE(map, adr, len); \
+ cfi_udelay(usec); \
+- spin_lock(chip->mutex); \
++ mutex_lock(&chip->mutex); \
+ } while (0)
+
+ #endif
+@@ -885,10 +885,10 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
+ /* Ensure cmd read/writes are aligned. */
+ cmd_addr = adr & ~(map_bankwidth(map)-1);
+
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, cmd_addr, FL_READY);
+ if (ret) {
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+@@ -901,7 +901,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
+
+ put_chip(map, chip, cmd_addr);
+
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ return 0;
+ }
+
+@@ -955,7 +955,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ retry:
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+
+ if (chip->state != FL_READY){
+ #if 0
+@@ -964,7 +964,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+@@ -993,7 +993,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
+ cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+
+ wake_up(&chip->wq);
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+
+ return 0;
+ }
+@@ -1062,10 +1062,10 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
+
+ adr += chip->start;
+
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, adr, FL_WRITING);
+ if (ret) {
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+@@ -1108,11 +1108,11 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ timeo = jiffies + (HZ / 2); /* FIXME */
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+ continue;
+ }
+
+@@ -1144,7 +1144,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
+ op_done:
+ chip->state = FL_READY;
+ put_chip(map, chip, adr);
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+
+ return ret;
+ }
+@@ -1176,7 +1176,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
+ map_word tmp_buf;
+
+ retry:
+- spin_lock(cfi->chips[chipnum].mutex);
++ mutex_lock(&cfi->chips[chipnum].mutex);
+
+ if (cfi->chips[chipnum].state != FL_READY) {
+ #if 0
+@@ -1185,7 +1185,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&cfi->chips[chipnum].wq, &wait);
+
+- spin_unlock(cfi->chips[chipnum].mutex);
++ mutex_unlock(&cfi->chips[chipnum].mutex);
+
+ schedule();
+ remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
+@@ -1199,7 +1199,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
+ /* Load 'tmp_buf' with old contents of flash */
+ tmp_buf = map_read(map, bus_ofs+chipstart);
+
+- spin_unlock(cfi->chips[chipnum].mutex);
++ mutex_unlock(&cfi->chips[chipnum].mutex);
+
+ /* Number of bytes to copy from buffer */
+ n = min_t(int, len, map_bankwidth(map)-i);
+@@ -1254,7 +1254,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
+ map_word tmp_buf;
+
+ retry1:
+- spin_lock(cfi->chips[chipnum].mutex);
++ mutex_lock(&cfi->chips[chipnum].mutex);
+
+ if (cfi->chips[chipnum].state != FL_READY) {
+ #if 0
+@@ -1263,7 +1263,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&cfi->chips[chipnum].wq, &wait);
+
+- spin_unlock(cfi->chips[chipnum].mutex);
++ mutex_unlock(&cfi->chips[chipnum].mutex);
+
+ schedule();
+ remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
+@@ -1276,7 +1276,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
+
+ tmp_buf = map_read(map, ofs + chipstart);
+
+- spin_unlock(cfi->chips[chipnum].mutex);
++ mutex_unlock(&cfi->chips[chipnum].mutex);
+
+ tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
+
+@@ -1311,10 +1311,10 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
+ adr += chip->start;
+ cmd_adr = adr;
+
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, adr, FL_WRITING);
+ if (ret) {
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+@@ -1369,11 +1369,11 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ timeo = jiffies + (HZ / 2); /* FIXME */
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+ continue;
+ }
+
+@@ -1401,7 +1401,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
+ op_done:
+ chip->state = FL_READY;
+ put_chip(map, chip, adr);
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+
+ return ret;
+ }
+@@ -1501,10 +1501,10 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
+
+ adr = cfi->addr_unlock1;
+
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, adr, FL_WRITING);
+ if (ret) {
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+@@ -1537,10 +1537,10 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
+ /* Someone's suspended the erase. Sleep */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+ continue;
+ }
+ if (chip->erase_suspended) {
+@@ -1574,7 +1574,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
+ chip->state = FL_READY;
+ xip_enable(map, chip, adr);
+ put_chip(map, chip, adr);
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+
+ return ret;
+ }
+@@ -1589,10 +1589,10 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
+
+ adr += chip->start;
+
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, adr, FL_ERASING);
+ if (ret) {
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+@@ -1625,10 +1625,10 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
+ /* Someone's suspended the erase. Sleep */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+ continue;
+ }
+ if (chip->erase_suspended) {
+@@ -1664,7 +1664,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
+
+ chip->state = FL_READY;
+ put_chip(map, chip, adr);
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+@@ -1716,7 +1716,7 @@ static int do_atmel_lock(struct map_info *map, struct flchip *chip,
+ struct cfi_private *cfi = map->fldrv_priv;
+ int ret;
+
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
+ if (ret)
+ goto out_unlock;
+@@ -1742,7 +1742,7 @@ static int do_atmel_lock(struct map_info *map, struct flchip *chip,
+ ret = 0;
+
+ out_unlock:
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+@@ -1752,7 +1752,7 @@ static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
+ struct cfi_private *cfi = map->fldrv_priv;
+ int ret;
+
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
+ if (ret)
+ goto out_unlock;
+@@ -1770,7 +1770,7 @@ static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
+ ret = 0;
+
+ out_unlock:
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+@@ -1798,7 +1798,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
+ chip = &cfi->chips[i];
+
+ retry:
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+
+ switch(chip->state) {
+ case FL_READY:
+@@ -1812,7 +1812,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
+ * with the chip now anyway.
+ */
+ case FL_SYNCING:
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ break;
+
+ default:
+@@ -1820,7 +1820,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+
+ schedule();
+
+@@ -1835,13 +1835,13 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
+ for (i--; i >=0; i--) {
+ chip = &cfi->chips[i];
+
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+
+ if (chip->state == FL_SYNCING) {
+ chip->state = chip->oldstate;
+ wake_up(&chip->wq);
+ }
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ }
+ }
+
+@@ -1857,7 +1857,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd)
+ for (i=0; !ret && i<cfi->numchips; i++) {
+ chip = &cfi->chips[i];
+
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+
+ switch(chip->state) {
+ case FL_READY:
+@@ -1877,7 +1877,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd)
+ ret = -EAGAIN;
+ break;
+ }
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ }
+
+ /* Unlock the chips again */
+@@ -1886,13 +1886,13 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd)
+ for (i--; i >=0; i--) {
+ chip = &cfi->chips[i];
+
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+
+ if (chip->state == FL_PM_SUSPENDED) {
+ chip->state = chip->oldstate;
+ wake_up(&chip->wq);
+ }
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ }
+ }
+
+@@ -1911,7 +1911,7 @@ static void cfi_amdstd_resume(struct mtd_info *mtd)
+
+ chip = &cfi->chips[i];
+
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+
+ if (chip->state == FL_PM_SUSPENDED) {
+ chip->state = FL_READY;
+@@ -1921,7 +1921,7 @@ static void cfi_amdstd_resume(struct mtd_info *mtd)
+ else
+ printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
+
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ }
+ }
+
+diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
+index d4714dd..0322c64 100644
+--- a/drivers/mtd/chips/cfi_cmdset_0020.c
++++ b/drivers/mtd/chips/cfi_cmdset_0020.c
+@@ -265,7 +265,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
+
+ timeo = jiffies + HZ;
+ retry:
+- spin_lock_bh(chip->mutex);
++ mutex_lock(&chip->mutex);
+
+ /* Check that the chip's ready to talk to us.
+ * If it's in FL_ERASING state, suspend it and make it talk now.
+@@ -296,15 +296,15 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
+ /* make sure we're in 'read status' mode */
+ map_write(map, CMD(0x70), cmd_addr);
+ chip->state = FL_ERASING;
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ printk(KERN_ERR "Chip not ready after erase "
+ "suspended: status = 0x%lx\n", status.x[0]);
+ return -EIO;
+ }
+
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+- spin_lock_bh(chip->mutex);
++ mutex_lock(&chip->mutex);
+ }
+
+ suspended = 1;
+@@ -335,13 +335,13 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
+
+ /* Urgh. Chip not yet ready to talk to us. */
+ if (time_after(jiffies, timeo)) {
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]);
+ return -EIO;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+ goto retry;
+
+@@ -351,7 +351,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
+ someone changes the status */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ timeo = jiffies + HZ;
+@@ -376,7 +376,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
+ }
+
+ wake_up(&chip->wq);
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ return 0;
+ }
+
+@@ -445,7 +445,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
+ #ifdef DEBUG_CFI_FEATURES
+ printk("%s: chip->state[%d]\n", __func__, chip->state);
+ #endif
+- spin_lock_bh(chip->mutex);
++ mutex_lock(&chip->mutex);
+
+ /* Check that the chip's ready to talk to us.
+ * Later, we can actually think about interrupting it
+@@ -470,14 +470,14 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
+ break;
+ /* Urgh. Chip not yet ready to talk to us. */
+ if (time_after(jiffies, timeo)) {
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n",
+ status.x[0], map_read(map, cmd_adr).x[0]);
+ return -EIO;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+ goto retry;
+
+@@ -486,7 +486,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
+ someone changes the status */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ timeo = jiffies + HZ;
+@@ -503,16 +503,16 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+- spin_lock_bh(chip->mutex);
++ mutex_lock(&chip->mutex);
+
+ if (++z > 100) {
+ /* Argh. Not ready for write to buffer */
+ DISABLE_VPP(map);
+ map_write(map, CMD(0x70), cmd_adr);
+ chip->state = FL_STATUS;
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]);
+ return -EIO;
+ }
+@@ -532,9 +532,9 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
+ map_write(map, CMD(0xd0), cmd_adr);
+ chip->state = FL_WRITING;
+
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ cfi_udelay(chip->buffer_write_time);
+- spin_lock_bh(chip->mutex);
++ mutex_lock(&chip->mutex);
+
+ timeo = jiffies + (HZ/2);
+ z = 0;
+@@ -543,11 +543,11 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
+ /* Someone's suspended the write. Sleep */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ timeo = jiffies + (HZ / 2); /* FIXME */
+- spin_lock_bh(chip->mutex);
++ mutex_lock(&chip->mutex);
+ continue;
+ }
+
+@@ -563,16 +563,16 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
+ map_write(map, CMD(0x70), adr);
+ chip->state = FL_STATUS;
+ DISABLE_VPP(map);
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
+ return -EIO;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+ z++;
+- spin_lock_bh(chip->mutex);
++ mutex_lock(&chip->mutex);
+ }
+ if (!z) {
+ chip->buffer_write_time--;
+@@ -596,11 +596,11 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
+ /* put back into read status register mode */
+ map_write(map, CMD(0x70), adr);
+ wake_up(&chip->wq);
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO;
+ }
+ wake_up(&chip->wq);
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+
+ return 0;
+ }
+@@ -749,7 +749,7 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
+
+ timeo = jiffies + HZ;
+ retry:
+- spin_lock_bh(chip->mutex);
++ mutex_lock(&chip->mutex);
+
+ /* Check that the chip's ready to talk to us. */
+ switch (chip->state) {
+@@ -766,13 +766,13 @@ retry:
+
+ /* Urgh. Chip not yet ready to talk to us. */
+ if (time_after(jiffies, timeo)) {
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
+ return -EIO;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+ goto retry;
+
+@@ -781,7 +781,7 @@ retry:
+ someone changes the status */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ timeo = jiffies + HZ;
+@@ -797,9 +797,9 @@ retry:
+ map_write(map, CMD(0xD0), adr);
+ chip->state = FL_ERASING;
+
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ msleep(1000);
+- spin_lock_bh(chip->mutex);
++ mutex_lock(&chip->mutex);
+
+ /* FIXME. Use a timer to check this, and return immediately. */
+ /* Once the state machine's known to be working I'll do that */
+@@ -810,11 +810,11 @@ retry:
+ /* Someone's suspended the erase. Sleep */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ timeo = jiffies + (HZ*20); /* FIXME */
+- spin_lock_bh(chip->mutex);
++ mutex_lock(&chip->mutex);
+ continue;
+ }
+
+@@ -828,14 +828,14 @@ retry:
+ chip->state = FL_STATUS;
+ printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
+ DISABLE_VPP(map);
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ return -EIO;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+- spin_lock_bh(chip->mutex);
++ mutex_lock(&chip->mutex);
+ }
+
+ DISABLE_VPP(map);
+@@ -878,7 +878,7 @@ retry:
+ printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
+ timeo = jiffies + HZ;
+ chip->state = FL_STATUS;
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ goto retry;
+ }
+ printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
+@@ -887,7 +887,7 @@ retry:
+ }
+
+ wake_up(&chip->wq);
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+@@ -995,7 +995,7 @@ static void cfi_staa_sync (struct mtd_info *mtd)
+ chip = &cfi->chips[i];
+
+ retry:
+- spin_lock_bh(chip->mutex);
++ mutex_lock(&chip->mutex);
+
+ switch(chip->state) {
+ case FL_READY:
+@@ -1009,7 +1009,7 @@ static void cfi_staa_sync (struct mtd_info *mtd)
+ * with the chip now anyway.
+ */
+ case FL_SYNCING:
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ break;
+
+ default:
+@@ -1017,7 +1017,7 @@ static void cfi_staa_sync (struct mtd_info *mtd)
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+
+@@ -1030,13 +1030,13 @@ static void cfi_staa_sync (struct mtd_info *mtd)
+ for (i--; i >=0; i--) {
+ chip = &cfi->chips[i];
+
+- spin_lock_bh(chip->mutex);
++ mutex_lock(&chip->mutex);
+
+ if (chip->state == FL_SYNCING) {
+ chip->state = chip->oldstate;
+ wake_up(&chip->wq);
+ }
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ }
+ }
+
+@@ -1054,7 +1054,7 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un
+
+ timeo = jiffies + HZ;
+ retry:
+- spin_lock_bh(chip->mutex);
++ mutex_lock(&chip->mutex);
+
+ /* Check that the chip's ready to talk to us. */
+ switch (chip->state) {
+@@ -1071,13 +1071,13 @@ retry:
+
+ /* Urgh. Chip not yet ready to talk to us. */
+ if (time_after(jiffies, timeo)) {
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
+ return -EIO;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+ goto retry;
+
+@@ -1086,7 +1086,7 @@ retry:
+ someone changes the status */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ timeo = jiffies + HZ;
+@@ -1098,9 +1098,9 @@ retry:
+ map_write(map, CMD(0x01), adr);
+ chip->state = FL_LOCKING;
+
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ msleep(1000);
+- spin_lock_bh(chip->mutex);
++ mutex_lock(&chip->mutex);
+
+ /* FIXME. Use a timer to check this, and return immediately. */
+ /* Once the state machine's known to be working I'll do that */
+@@ -1118,21 +1118,21 @@ retry:
+ chip->state = FL_STATUS;
+ printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
+ DISABLE_VPP(map);
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ return -EIO;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+- spin_lock_bh(chip->mutex);
++ mutex_lock(&chip->mutex);
+ }
+
+ /* Done and happy. */
+ chip->state = FL_STATUS;
+ DISABLE_VPP(map);
+ wake_up(&chip->wq);
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ return 0;
+ }
+ static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
+@@ -1203,7 +1203,7 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip,
+
+ timeo = jiffies + HZ;
+ retry:
+- spin_lock_bh(chip->mutex);
++ mutex_lock(&chip->mutex);
+
+ /* Check that the chip's ready to talk to us. */
+ switch (chip->state) {
+@@ -1220,13 +1220,13 @@ retry:
+
+ /* Urgh. Chip not yet ready to talk to us. */
+ if (time_after(jiffies, timeo)) {
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
+ return -EIO;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+ goto retry;
+
+@@ -1235,7 +1235,7 @@ retry:
+ someone changes the status */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ timeo = jiffies + HZ;
+@@ -1247,9 +1247,9 @@ retry:
+ map_write(map, CMD(0xD0), adr);
+ chip->state = FL_UNLOCKING;
+
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ msleep(1000);
+- spin_lock_bh(chip->mutex);
++ mutex_lock(&chip->mutex);
+
+ /* FIXME. Use a timer to check this, and return immediately. */
+ /* Once the state machine's known to be working I'll do that */
+@@ -1267,21 +1267,21 @@ retry:
+ chip->state = FL_STATUS;
+ printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
+ DISABLE_VPP(map);
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ return -EIO;
+ }
+
+ /* Latency issues. Drop the unlock, wait a while and retry */
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ cfi_udelay(1);
+- spin_lock_bh(chip->mutex);
++ mutex_lock(&chip->mutex);
+ }
+
+ /* Done and happy. */
+ chip->state = FL_STATUS;
+ DISABLE_VPP(map);
+ wake_up(&chip->wq);
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ return 0;
+ }
+ static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
+@@ -1334,7 +1334,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
+ for (i=0; !ret && i<cfi->numchips; i++) {
+ chip = &cfi->chips[i];
+
+- spin_lock_bh(chip->mutex);
++ mutex_lock(&chip->mutex);
+
+ switch(chip->state) {
+ case FL_READY:
+@@ -1354,7 +1354,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
+ ret = -EAGAIN;
+ break;
+ }
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ }
+
+ /* Unlock the chips again */
+@@ -1363,7 +1363,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
+ for (i--; i >=0; i--) {
+ chip = &cfi->chips[i];
+
+- spin_lock_bh(chip->mutex);
++ mutex_lock(&chip->mutex);
+
+ if (chip->state == FL_PM_SUSPENDED) {
+ /* No need to force it into a known state here,
+@@ -1372,7 +1372,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
+ chip->state = chip->oldstate;
+ wake_up(&chip->wq);
+ }
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ }
+ }
+
+@@ -1390,7 +1390,7 @@ static void cfi_staa_resume(struct mtd_info *mtd)
+
+ chip = &cfi->chips[i];
+
+- spin_lock_bh(chip->mutex);
++ mutex_lock(&chip->mutex);
+
+ /* Go to known state. Chip may have been power cycled */
+ if (chip->state == FL_PM_SUSPENDED) {
+@@ -1399,7 +1399,7 @@ static void cfi_staa_resume(struct mtd_info *mtd)
+ wake_up(&chip->wq);
+ }
+
+- spin_unlock_bh(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ }
+ }
+
+diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h
+index ab44f2b..329906b 100644
+--- a/drivers/mtd/chips/fwh_lock.h
++++ b/drivers/mtd/chips/fwh_lock.h
+@@ -58,10 +58,10 @@ static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip,
+ * to flash memory - that means that we don't have to check status
+ * and timeout.
+ */
+- spin_lock(chip->mutex);
++ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, adr, FL_LOCKING);
+ if (ret) {
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+@@ -72,7 +72,7 @@ static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip,
+ /* Done and happy. */
+ chip->state = chip->oldstate;
+ put_chip(map, chip, adr);
+- spin_unlock(chip->mutex);
++ mutex_unlock(&chip->mutex);
+ return 0;
+ }
+
+diff --git a/drivers/mtd/chips/gen_probe.c b/drivers/mtd/chips/gen_probe.c
+index e2dc964..fcc1bc0 100644
+--- a/drivers/mtd/chips/gen_probe.c
++++ b/drivers/mtd/chips/gen_probe.c
+@@ -155,8 +155,7 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
+ pchip->start = (i << cfi.chipshift);
+ pchip->state = FL_READY;
+ init_waitqueue_head(&pchip->wq);
+- spin_lock_init(&pchip->_spinlock);
+- pchip->mutex = &pchip->_spinlock;
++ mutex_init(&pchip->mutex);
+ }
+ }
+
+diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h
+index d4f38c5..bfc2f1a 100644
+--- a/include/linux/mtd/flashchip.h
++++ b/include/linux/mtd/flashchip.h
+@@ -15,6 +15,7 @@
+ * has asm/spinlock.h, or 2.4, which has linux/spinlock.h
+ */
+ #include <linux/sched.h>
++#include <linux/mutex.h>
+
+ typedef enum {
+ FL_READY,
+@@ -65,8 +66,7 @@ struct flchip {
+ unsigned int erase_suspended:1;
+ unsigned long in_progress_block_addr;
+
+- spinlock_t *mutex;
+- spinlock_t _spinlock; /* We do it like this because sometimes they'll be shared. */
++ struct mutex mutex;
+ wait_queue_head_t wq; /* Wait on here when we're waiting for the chip
+ to be ready */
+ int word_write_time;
+--
+1.7.3.4
+