old version maemo13
authortanner <tanner@mach.kyb.local>
Thu, 15 Apr 2010 09:49:24 +0000 (11:49 +0200)
committertanner <tanner@mach.kyb.local>
Thu, 15 Apr 2010 09:49:24 +0000 (11:49 +0200)
kernel-maemo-2.6.28/debian/changelog
kernel-maemo-2.6.28/debian/control
kernel-maemo-2.6.28/debian/kernel-modules-maemo.postrm
kernel-maemo-2.6.28/debian/patches/2.6.28.10.diff [new file with mode: 0644]
kernel-maemo-2.6.28/debian/patches/block2mtd-yoush.diff [new file with mode: 0644]
kernel-maemo-2.6.28/debian/patches/gentoo-fsfixes.diff [new file with mode: 0644]
kernel-maemo-2.6.28/debian/patches/nokia-swapfile.diff [new file with mode: 0644]
kernel-maemo-2.6.28/debian/patches/series
kernel-maemo-2.6.28/debian/rx51maemo_defconfig

index 40e7715..e0f6998 100644 (file)
@@ -1,3 +1,17 @@
+kernel-maemo (2.6.28-maemo13) fremantle; urgency=low
+
+  * control: rename kernel-module* to kernel-feature*, add qos
+  * change my email address
+  * update to 2.6.28.10 (minus existing Nokia patches)
+  * clean modules dir when uninstalling
+  * enable mtdblock, block2mtd, nandsim modules
+  * enable large file support for mounting standard ext4
+  * disable NILFS2 (not useful for flash devices)
+  * patch block2mtd for mounting ubifs rootfs image (Nikita V. Youshchenko)
+  * add ext4 and jdb filesystem patches from gentoo
+
+ -- Thomas Tanner <maemo@tannerlab.com>  Fri, 19 Mar 2010 00:00:00 +0100
+
 kernel-maemo (2.6.28-maemo12) fremantle; urgency=low
 
   * enabled g_serial and usb ethernet gadget
index 8d6d267..0bb2ed0 100644 (file)
@@ -1,9 +1,9 @@
 Source: kernel-maemo
 Section: utils
 Priority: optional
-Maintainer: Thomas Tanner <tanner@maemory.com>
+Maintainer: Thomas Tanner <maemo@tannerlab.com>
+XSBC-Bugtracker: mailto:maemo-bugs@tannerlab.com
 XSBC-Original-Maintainer:  Yauheni Kaliuta <yauheni.kaliuta@nokia.com>
-XSBC-Bugtracker: mailto:tanner@maemory.com
 Build-Depends: debhelper (>= 4.0.0), quilt, sdk-fiasco-gen
 Build-Depends-Indep: bzip2
 Standards-Version: 3.8.0
@@ -17,19 +17,19 @@ Package: kernel-flasher-maemo
 Section: user/system
 Architecture: armel
 Pre-Depends: kernel-maemo (= ${binary:Version}), kernel-modules-maemo (= ${binary:Version}), softupd (>= 0.4.0)
-Recommends: usbip, iptables, nilfs-utils
-Description: Linux kernel updater for an enhanced Maemo 5 kernel
+Recommends: usbip, iptables, mtd-utils
+Description: Linux kernel updater for an enhanced Maemo 5 kernel 2.6.28.10
  This package will flash the kernel image upon installation
  and eat kernel's files from /boot. If you want to revert to the stock
  kernel, run "apt-get install --reinstall kernel kernel-flasher" as root.
  .
  This custom kernel contains additional modules for IPv6, packet filtering,
- NAT, tunneling, kernel configuration, Wifi mesh networking,
+ QoS,  NAT, tunneling, kernel configuration, Wifi mesh networking,
  builtin ext3 for booting from other media, ext4, XFS, reiserfs,
- NILFS2, NTFS read support, ISO9660, UDF, CIFS, automounter,
+ NTFS read support, ISO9660, UDF, CIFS, automounter,
  UNIONFS, device mapper and dm-loop, cryptography, cryptoloop,
- EFI partitions, UTF8 codepages, mouse+joystick input,
- USB/IP and generic USB device drivers.
+ EFI partitions, UTF8 codepages, mouse+joystick input, PPP,
+ serial support, USB/IP and generic USB device drivers.
 XB-Maemo-Icon-26:
  iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAYAAABXAvmHAAAABmJLR0QAAAAA
  AAD5Q7t/AAAACXBIWXMAAABIAAAASABGyWs+AAAACXZwQWcAAAAwAAAAMADO
@@ -115,9 +115,9 @@ Description: Linux Kernel Headers for development
 Package: kernel-modules-maemo
 Architecture: armel
 Depends: module-init-tools (>= 3.3), kernel-maemo (= ${binary:Version})
-Provides: kernel-modules-netfilter, kernel-modules-ipv6, kernel-modules-ext4, kernel-modules-xfs, kernel-modules-reiserfs,
-  kernel-modules-nilfs2, kernel-modules-ntfs-read, kernel-modules-udf, kernel-modules-cifs, kernel-modules-automounter,
-  kernel-modules-unionfs, kernel-modules-crypto, kernel-modules-dmloop, kernel-modules-utf8, kernel-modules-mousejoy,
-  kernel-modules-usbip
+Provides: kernel-feature-netfilter, kernel-feature-ipv6, kernel-feature-ext4, kernel-feature-xfs, kernel-feature-reiserfs,
+  kernel-feature-ntfs-read, kernel-feature-udf, kernel-feature-cifs, kernel-feature-automounter,
+  kernel-feature-unionfs, kernel-feature-crypto, kernel-feature-dmloop, kernel-feature-utf8, kernel-feature-mousejoy,
+  kernel-feature-usbip, kernel-feature-ppp, kernel-feature-qos, kernel-feature-block2mtd
 Description: Linux kernel modules
- This package provides kernel modules
+ This package provides kernel modules for the enhanced Maemo 5 kernel 2.6.28.10
index 9c48027..2acfddc 100644 (file)
@@ -2,11 +2,7 @@
 
 set -e
 
-cd /lib/modules
-
-ls -1d * | grep -v current | while read i; do
-   /sbin/depmod $i || :
-done
+rm /lib/modules/2.6.28.10maemo-omap1/modules.*
 
 #DEBHELPER#
 
diff --git a/kernel-maemo-2.6.28/debian/patches/2.6.28.10.diff b/kernel-maemo-2.6.28/debian/patches/2.6.28.10.diff
new file mode 100644 (file)
index 0000000..38c69cc
--- /dev/null
@@ -0,0 +1,25050 @@
+--- kernel-maemo-2.6.28.test.orig/Documentation/filesystems/sysfs-pci.txt
++++ kernel-maemo-2.6.28.test/Documentation/filesystems/sysfs-pci.txt
+@@ -9,6 +9,7 @@
+      |   |-- class
+      |   |-- config
+      |   |-- device
++     |   |-- enable
+      |   |-- irq
+      |   |-- local_cpus
+      |   |-- resource
+@@ -32,6 +33,7 @@
+        class             PCI class (ascii, ro)
+        config            PCI config space (binary, rw)
+        device            PCI device (ascii, ro)
++       enable            Whether the device is enabled (ascii, rw)
+        irq               IRQ number (ascii, ro)
+        local_cpus        nearby CPU mask (cpumask, ro)
+        resource                  PCI resource host addresses (ascii, ro)
+@@ -57,10 +59,19 @@
+ don't support mmapping of certain resources, so be sure to check the return
+ value from any attempted mmap.
++The 'enable' file provides a counter that indicates how many times the device
++has been enabled.  If the 'enable' file currently returns '4', and a '1' is
++echoed into it, it will then return '5'.  Echoing a '0' into it will decrease
++the count.  Even when it returns to 0, though, some of the initialisation
++may not be reversed.
++
+ The 'rom' file is special in that it provides read-only access to the device's
+ ROM file, if available.  It's disabled by default, however, so applications
+ should write the string "1" to the file to enable it before attempting a read
+-call, and disable it following the access by writing "0" to the file.
++call, and disable it following the access by writing "0" to the file.  Note
++that the device must be enabled for a rom read to return data succesfully.
++In the event a driver is not bound to the device, it can be enabled using the
++'enable' file, documented above.
+ Accessing legacy resources through sysfs
+ ----------------------------------------
+--- kernel-maemo-2.6.28.test.orig/Documentation/sound/alsa/ALSA-Configuration.txt
++++ kernel-maemo-2.6.28.test/Documentation/sound/alsa/ALSA-Configuration.txt
+@@ -979,9 +979,10 @@
+         6stack        6-jack, separate surrounds (default)
+         3stack        3-stack, shared surrounds
+         laptop        2-channel only (FSC V2060, Samsung M50)
+-        laptop-eapd   2-channel with EAPD (Samsung R65, ASUS A6J)
++        laptop-eapd   2-channel with EAPD (ASUS A6J)
+         laptop-automute 2-channel with EAPD and HP-automute (Lenovo N100)
+         ultra         2-channel with EAPD (Samsung Ultra tablet PC)
++        samsung       2-channel with EAPD (Samsung R65)
+       AD1988/AD1988B/AD1989A/AD1989B
+         6stack        6-jack
+--- kernel-maemo-2.6.28.test.orig/Makefile
++++ kernel-maemo-2.6.28.test/Makefile
+@@ -1,7 +1,7 @@
+ VERSION = 2
+ PATCHLEVEL = 6
+ SUBLEVEL = 28
+-EXTRAVERSION = maemo
++EXTRAVERSION = .10maemo
+ NAME = Erotic Pickled Herring
+ # *DOCUMENTATION*
+@@ -560,6 +560,9 @@
+ # disable pointer signed / unsigned warnings in gcc 4.0
+ KBUILD_CFLAGS += $(call cc-option,-Wno-pointer-sign,)
++# disable invalid "can't wrap" optimzations for signed / pointers
++KBUILD_CFLAGS += $(call cc-option,-fwrapv)
++
+ # Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments
+ # But warn user when we do so
+ warn-assign = \
+--- kernel-maemo-2.6.28.test.orig/arch/Kconfig
++++ kernel-maemo-2.6.28.test/arch/Kconfig
+@@ -60,6 +60,9 @@
+         See Documentation/unaligned-memory-access.txt for more
+         information on the topic of unaligned memory accesses.
++config HAVE_SYSCALL_WRAPPERS
++      bool
++
+ config KRETPROBES
+       def_bool y
+       depends on KPROBES && HAVE_KRETPROBES
+--- kernel-maemo-2.6.28.test.orig/arch/alpha/kernel/entry.S
++++ kernel-maemo-2.6.28.test/arch/alpha/kernel/entry.S
+@@ -894,9 +894,9 @@
+ .end sys_getxpid
+       .align  4
+-      .globl  sys_pipe
+-      .ent    sys_pipe
+-sys_pipe:
++      .globl  sys_alpha_pipe
++      .ent    sys_alpha_pipe
++sys_alpha_pipe:
+       lda     $sp, -16($sp)
+       stq     $26, 0($sp)
+       .prologue 0
+@@ -914,7 +914,7 @@
+       stq     $1, 80+16($sp)
+ 1:    lda     $sp, 16($sp)
+       ret
+-.end sys_pipe
++.end sys_alpha_pipe
+       .align  4
+       .globl  sys_execve
+--- kernel-maemo-2.6.28.test.orig/arch/alpha/kernel/irq_srm.c
++++ kernel-maemo-2.6.28.test/arch/alpha/kernel/irq_srm.c
+@@ -63,6 +63,8 @@
+ {
+       long i;
++      if (NR_IRQS <= 16)
++              return;
+       for (i = 16; i < max; ++i) {
+               if (i < 64 && ((ignore_mask >> i) & 1))
+                       continue;
+--- kernel-maemo-2.6.28.test.orig/arch/alpha/kernel/systbls.S
++++ kernel-maemo-2.6.28.test/arch/alpha/kernel/systbls.S
+@@ -52,7 +52,7 @@
+       .quad sys_setpgid
+       .quad alpha_ni_syscall                  /* 40 */
+       .quad sys_dup
+-      .quad sys_pipe
++      .quad sys_alpha_pipe
+       .quad osf_set_program_attributes
+       .quad alpha_ni_syscall
+       .quad sys_open                          /* 45 */
+--- kernel-maemo-2.6.28.test.orig/arch/arm/kernel/calls.S
++++ kernel-maemo-2.6.28.test/arch/arm/kernel/calls.S
+@@ -98,7 +98,7 @@
+               CALL(sys_uselib)
+               CALL(sys_swapon)
+               CALL(sys_reboot)
+-              CALL(OBSOLETE(old_readdir))     /* used by libc4 */
++              CALL(OBSOLETE(sys_old_readdir)) /* used by libc4 */
+ /* 90 */      CALL(OBSOLETE(old_mmap))        /* used by libc4 */
+               CALL(sys_munmap)
+               CALL(sys_truncate)
+--- kernel-maemo-2.6.28.test.orig/arch/arm/mach-rpc/riscpc.c
++++ kernel-maemo-2.6.28.test/arch/arm/mach-rpc/riscpc.c
+@@ -19,6 +19,7 @@
+ #include <linux/serial_8250.h>
+ #include <linux/ata_platform.h>
+ #include <linux/io.h>
++#include <linux/i2c.h>
+ #include <asm/elf.h>
+ #include <asm/mach-types.h>
+@@ -201,8 +202,13 @@
+       &pata_device,
+ };
++static struct i2c_board_info i2c_rtc = {
++      I2C_BOARD_INFO("pcf8583", 0x50)
++};
++
+ static int __init rpc_init(void)
+ {
++      i2c_register_board_info(0, &i2c_rtc, 1);
+       return platform_add_devices(devs, ARRAY_SIZE(devs));
+ }
+--- kernel-maemo-2.6.28.test.orig/arch/cris/arch-v10/kernel/entry.S
++++ kernel-maemo-2.6.28.test/arch/cris/arch-v10/kernel/entry.S
+@@ -691,7 +691,7 @@
+       .long sys_uselib
+       .long sys_swapon
+       .long sys_reboot
+-      .long old_readdir
++      .long sys_old_readdir
+       .long old_mmap          /* 90 */
+       .long sys_munmap
+       .long sys_truncate
+--- kernel-maemo-2.6.28.test.orig/arch/cris/arch-v32/kernel/entry.S
++++ kernel-maemo-2.6.28.test/arch/cris/arch-v32/kernel/entry.S
+@@ -614,7 +614,7 @@
+       .long sys_uselib
+       .long sys_swapon
+       .long sys_reboot
+-      .long old_readdir
++      .long sys_old_readdir
+       .long old_mmap          /* 90 */
+       .long sys_munmap
+       .long sys_truncate
+--- kernel-maemo-2.6.28.test.orig/arch/h8300/kernel/syscalls.S
++++ kernel-maemo-2.6.28.test/arch/h8300/kernel/syscalls.S
+@@ -103,7 +103,7 @@
+       .long SYMBOL_NAME(sys_uselib)
+       .long SYMBOL_NAME(sys_swapon)
+       .long SYMBOL_NAME(sys_reboot)
+-      .long SYMBOL_NAME(old_readdir)
++      .long SYMBOL_NAME(sys_old_readdir)
+       .long SYMBOL_NAME(old_mmap)             /* 90 */
+       .long SYMBOL_NAME(sys_munmap)
+       .long SYMBOL_NAME(sys_truncate)
+--- kernel-maemo-2.6.28.test.orig/arch/ia64/Kconfig
++++ kernel-maemo-2.6.28.test/arch/ia64/Kconfig
+@@ -17,6 +17,7 @@
+       select ACPI if (!IA64_HP_SIM)
+       select PM if (!IA64_HP_SIM)
+       select ARCH_SUPPORTS_MSI
++      select HAVE_UNSTABLE_SCHED_CLOCK
+       select HAVE_IDE
+       select HAVE_OPROFILE
+       select HAVE_KPROBES
+@@ -478,8 +479,7 @@
+       default y if VIRTUAL_MEM_MAP
+ config HAVE_ARCH_EARLY_PFN_TO_NID
+-      def_bool y
+-      depends on NEED_MULTIPLE_NODES
++      def_bool NUMA && SPARSEMEM
+ config HAVE_ARCH_NODEDATA_EXTENSION
+       def_bool y
+--- kernel-maemo-2.6.28.test.orig/arch/ia64/ia32/ia32_entry.S
++++ kernel-maemo-2.6.28.test/arch/ia64/ia32/ia32_entry.S
+@@ -220,7 +220,7 @@
+       data8 sys_mkdir
+       data8 sys_rmdir           /* 40 */
+       data8 sys_dup
+-      data8 sys_pipe
++      data8 sys_ia64_pipe
+       data8 compat_sys_times
+       data8 sys_ni_syscall      /* old prof syscall holder */
+       data8 sys32_brk           /* 45 */
+--- kernel-maemo-2.6.28.test.orig/arch/ia64/include/asm/mmzone.h
++++ kernel-maemo-2.6.28.test/arch/ia64/include/asm/mmzone.h
+@@ -31,10 +31,6 @@
+ #endif
+ }
+-#ifdef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
+-extern int early_pfn_to_nid(unsigned long pfn);
+-#endif
+-
+ #ifdef CONFIG_IA64_DIG /* DIG systems are small */
+ # define MAX_PHYSNODE_ID      8
+ # define NR_NODE_MEMBLKS      (MAX_NUMNODES * 8)
+--- kernel-maemo-2.6.28.test.orig/arch/ia64/include/asm/unistd.h
++++ kernel-maemo-2.6.28.test/arch/ia64/include/asm/unistd.h
+@@ -364,7 +364,7 @@
+ struct sigaction;
+ long sys_execve(char __user *filename, char __user * __user *argv,
+                          char __user * __user *envp, struct pt_regs *regs);
+-asmlinkage long sys_pipe(void);
++asmlinkage long sys_ia64_pipe(void);
+ asmlinkage long sys_rt_sigaction(int sig,
+                                const struct sigaction __user *act,
+                                struct sigaction __user *oact,
+--- kernel-maemo-2.6.28.test.orig/arch/ia64/kernel/entry.S
++++ kernel-maemo-2.6.28.test/arch/ia64/kernel/entry.S
+@@ -1442,7 +1442,7 @@
+       data8 sys_mkdir                         // 1055
+       data8 sys_rmdir
+       data8 sys_dup
+-      data8 sys_pipe
++      data8 sys_ia64_pipe
+       data8 sys_times
+       data8 ia64_brk                          // 1060
+       data8 sys_setgid
+--- kernel-maemo-2.6.28.test.orig/arch/ia64/kernel/sys_ia64.c
++++ kernel-maemo-2.6.28.test/arch/ia64/kernel/sys_ia64.c
+@@ -154,7 +154,7 @@
+  * and r9) as this is faster than doing a copy_to_user().
+  */
+ asmlinkage long
+-sys_pipe (void)
++sys_ia64_pipe (void)
+ {
+       struct pt_regs *regs = task_pt_regs(current);
+       int fd[2];
+--- kernel-maemo-2.6.28.test.orig/arch/ia64/mm/numa.c
++++ kernel-maemo-2.6.28.test/arch/ia64/mm/numa.c
+@@ -58,7 +58,7 @@
+  * SPARSEMEM to allocate the SPARSEMEM sectionmap on the NUMA node where
+  * the section resides.
+  */
+-int early_pfn_to_nid(unsigned long pfn)
++int __meminit __early_pfn_to_nid(unsigned long pfn)
+ {
+       int i, section = pfn >> PFN_SECTION_SHIFT, ssec, esec;
+@@ -70,7 +70,7 @@
+                       return node_memblk[i].nid;
+       }
+-      return 0;
++      return -1;
+ }
+ #ifdef CONFIG_MEMORY_HOTPLUG
+--- kernel-maemo-2.6.28.test.orig/arch/ia64/sn/kernel/io_acpi_init.c
++++ kernel-maemo-2.6.28.test/arch/ia64/sn/kernel/io_acpi_init.c
+@@ -434,7 +434,7 @@
+               size = pci_resource_len(dev, PCI_ROM_RESOURCE);
+               addr = ioremap(pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE],
+                              size);
+-              image_size = pci_get_rom_size(addr, size);
++              image_size = pci_get_rom_size(dev, addr, size);
+               dev->resource[PCI_ROM_RESOURCE].start = (unsigned long) addr;
+               dev->resource[PCI_ROM_RESOURCE].end =
+                                       (unsigned long) addr + image_size - 1;
+--- kernel-maemo-2.6.28.test.orig/arch/ia64/sn/kernel/io_init.c
++++ kernel-maemo-2.6.28.test/arch/ia64/sn/kernel/io_init.c
+@@ -269,7 +269,7 @@
+                       rom = ioremap(pci_resource_start(dev, PCI_ROM_RESOURCE),
+                                     size + 1);
+-                      image_size = pci_get_rom_size(rom, size + 1);
++                      image_size = pci_get_rom_size(dev, rom, size + 1);
+                       dev->resource[PCI_ROM_RESOURCE].end =
+                               dev->resource[PCI_ROM_RESOURCE].start +
+                               image_size - 1;
+--- kernel-maemo-2.6.28.test.orig/arch/m68k/kernel/entry.S
++++ kernel-maemo-2.6.28.test/arch/m68k/kernel/entry.S
+@@ -513,7 +513,7 @@
+       .long sys_uselib
+       .long sys_swapon
+       .long sys_reboot
+-      .long old_readdir
++      .long sys_old_readdir
+       .long old_mmap          /* 90 */
+       .long sys_munmap
+       .long sys_truncate
+--- kernel-maemo-2.6.28.test.orig/arch/m68knommu/Kconfig
++++ kernel-maemo-2.6.28.test/arch/m68knommu/Kconfig
+@@ -14,6 +14,10 @@
+       bool
+       default n
++config NO_DMA
++      bool
++      default y
++
+ config FPU
+       bool
+       default n
+--- kernel-maemo-2.6.28.test.orig/arch/m68knommu/kernel/syscalltable.S
++++ kernel-maemo-2.6.28.test/arch/m68knommu/kernel/syscalltable.S
+@@ -107,7 +107,7 @@
+       .long sys_uselib
+       .long sys_ni_syscall    /* sys_swapon */
+       .long sys_reboot
+-      .long old_readdir
++      .long sys_old_readdir
+       .long old_mmap          /* 90 */
+       .long sys_munmap
+       .long sys_truncate
+--- kernel-maemo-2.6.28.test.orig/arch/mips/include/asm/compat.h
++++ kernel-maemo-2.6.28.test/arch/mips/include/asm/compat.h
+@@ -3,6 +3,8 @@
+ /*
+  * Architecture specific compatibility types
+  */
++#include <linux/seccomp.h>
++#include <linux/thread_info.h>
+ #include <linux/types.h>
+ #include <asm/page.h>
+ #include <asm/ptrace.h>
+@@ -218,4 +220,9 @@
+       compat_ulong_t  __unused2;
+ };
++static inline int is_compat_task(void)
++{
++      return test_thread_flag(TIF_32BIT);
++}
++
+ #endif /* _ASM_COMPAT_H */
+--- kernel-maemo-2.6.28.test.orig/arch/mips/include/asm/seccomp.h
++++ kernel-maemo-2.6.28.test/arch/mips/include/asm/seccomp.h
+@@ -1,6 +1,5 @@
+ #ifndef __ASM_SECCOMP_H
+-#include <linux/thread_info.h>
+ #include <linux/unistd.h>
+ #define __NR_seccomp_read __NR_read
+--- kernel-maemo-2.6.28.test.orig/arch/mips/kernel/scall32-o32.S
++++ kernel-maemo-2.6.28.test/arch/mips/kernel/scall32-o32.S
+@@ -398,7 +398,7 @@
+       sys     sys_uselib              1
+       sys     sys_swapon              2
+       sys     sys_reboot              3
+-      sys     old_readdir             3
++      sys     sys_old_readdir         3
+       sys     old_mmap                6       /* 4090 */
+       sys     sys_munmap              2
+       sys     sys_truncate            2
+--- kernel-maemo-2.6.28.test.orig/arch/mn10300/kernel/entry.S
++++ kernel-maemo-2.6.28.test/arch/mn10300/kernel/entry.S
+@@ -478,7 +478,7 @@
+       .long sys_uselib
+       .long sys_swapon
+       .long sys_reboot
+-      .long old_readdir
++      .long sys_old_readdir
+       .long old_mmap          /* 90 */
+       .long sys_munmap
+       .long sys_truncate
+--- kernel-maemo-2.6.28.test.orig/arch/powerpc/Kconfig
++++ kernel-maemo-2.6.28.test/arch/powerpc/Kconfig
+@@ -121,6 +121,7 @@
+       select HAVE_DMA_ATTRS if PPC64
+       select USE_GENERIC_SMP_HELPERS if SMP
+       select HAVE_OPROFILE
++      select HAVE_SYSCALL_WRAPPERS if PPC64
+ config EARLY_PRINTK
+       bool
+--- kernel-maemo-2.6.28.test.orig/arch/powerpc/include/asm/compat.h
++++ kernel-maemo-2.6.28.test/arch/powerpc/include/asm/compat.h
+@@ -210,5 +210,10 @@
+       compat_ulong_t __unused6;
+ };
++static inline int is_compat_task(void)
++{
++      return test_thread_flag(TIF_32BIT);
++}
++
+ #endif /* __KERNEL__ */
+ #endif /* _ASM_POWERPC_COMPAT_H */
+--- kernel-maemo-2.6.28.test.orig/arch/powerpc/include/asm/futex.h
++++ kernel-maemo-2.6.28.test/arch/powerpc/include/asm/futex.h
+@@ -27,7 +27,7 @@
+       PPC_LONG "1b,4b,2b,4b\n" \
+       ".previous" \
+       : "=&r" (oldval), "=&r" (ret) \
+-      : "b" (uaddr), "i" (-EFAULT), "1" (oparg) \
++      : "b" (uaddr), "i" (-EFAULT), "r" (oparg) \
+       : "cr0", "memory")
+ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
+@@ -47,19 +47,19 @@
+       switch (op) {
+       case FUTEX_OP_SET:
+-              __futex_atomic_op("", ret, oldval, uaddr, oparg);
++              __futex_atomic_op("mr %1,%4\n", ret, oldval, uaddr, oparg);
+               break;
+       case FUTEX_OP_ADD:
+-              __futex_atomic_op("add %1,%0,%1\n", ret, oldval, uaddr, oparg);
++              __futex_atomic_op("add %1,%0,%4\n", ret, oldval, uaddr, oparg);
+               break;
+       case FUTEX_OP_OR:
+-              __futex_atomic_op("or %1,%0,%1\n", ret, oldval, uaddr, oparg);
++              __futex_atomic_op("or %1,%0,%4\n", ret, oldval, uaddr, oparg);
+               break;
+       case FUTEX_OP_ANDN:
+-              __futex_atomic_op("andc %1,%0,%1\n", ret, oldval, uaddr, oparg);
++              __futex_atomic_op("andc %1,%0,%4\n", ret, oldval, uaddr, oparg);
+               break;
+       case FUTEX_OP_XOR:
+-              __futex_atomic_op("xor %1,%0,%1\n", ret, oldval, uaddr, oparg);
++              __futex_atomic_op("xor %1,%0,%4\n", ret, oldval, uaddr, oparg);
+               break;
+       default:
+               ret = -ENOSYS;
+--- kernel-maemo-2.6.28.test.orig/arch/powerpc/include/asm/processor.h
++++ kernel-maemo-2.6.28.test/arch/powerpc/include/asm/processor.h
+@@ -309,6 +309,25 @@
+ #define HAVE_ARCH_PICK_MMAP_LAYOUT
+ #endif
++#ifdef CONFIG_PPC64
++static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32)
++{
++      unsigned long sp;
++
++      if (is_32)
++              sp = regs->gpr[1] & 0x0ffffffffUL;
++      else
++              sp = regs->gpr[1];
++
++      return sp;
++}
++#else
++static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32)
++{
++      return regs->gpr[1];
++}
++#endif
++
+ #endif /* __KERNEL__ */
+ #endif /* __ASSEMBLY__ */
+ #endif /* _ASM_POWERPC_PROCESSOR_H */
+--- kernel-maemo-2.6.28.test.orig/arch/powerpc/include/asm/seccomp.h
++++ kernel-maemo-2.6.28.test/arch/powerpc/include/asm/seccomp.h
+@@ -1,10 +1,6 @@
+ #ifndef _ASM_POWERPC_SECCOMP_H
+ #define _ASM_POWERPC_SECCOMP_H
+-#ifdef __KERNEL__
+-#include <linux/thread_info.h>
+-#endif
+-
+ #include <linux/unistd.h>
+ #define __NR_seccomp_read __NR_read
+--- kernel-maemo-2.6.28.test.orig/arch/powerpc/include/asm/systbl.h
++++ kernel-maemo-2.6.28.test/arch/powerpc/include/asm/systbl.h
+@@ -92,7 +92,7 @@
+ SYSCALL(uselib)
+ SYSCALL(swapon)
+ SYSCALL(reboot)
+-SYSX(sys_ni_syscall,compat_sys_old_readdir,old_readdir)
++SYSX(sys_ni_syscall,compat_sys_old_readdir,sys_old_readdir)
+ SYSCALL_SPU(mmap)
+ SYSCALL_SPU(munmap)
+ SYSCALL_SPU(truncate)
+--- kernel-maemo-2.6.28.test.orig/arch/powerpc/kernel/align.c
++++ kernel-maemo-2.6.28.test/arch/powerpc/kernel/align.c
+@@ -367,27 +367,24 @@
+ static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg,
+                          unsigned int flags)
+ {
+-      char *ptr = (char *) &current->thread.TS_FPR(reg);
+-      int i, ret;
++      char *ptr0 = (char *) &current->thread.TS_FPR(reg);
++      char *ptr1 = (char *) &current->thread.TS_FPR(reg+1);
++      int i, ret, sw = 0;
+       if (!(flags & F))
+               return 0;
+       if (reg & 1)
+               return 0;       /* invalid form: FRS/FRT must be even */
+-      if (!(flags & SW)) {
+-              /* not byte-swapped - easy */
+-              if (!(flags & ST))
+-                      ret = __copy_from_user(ptr, addr, 16);
+-              else
+-                      ret = __copy_to_user(addr, ptr, 16);
+-      } else {
+-              /* each FPR value is byte-swapped separately */
+-              ret = 0;
+-              for (i = 0; i < 16; ++i) {
+-                      if (!(flags & ST))
+-                              ret |= __get_user(ptr[i^7], addr + i);
+-                      else
+-                              ret |= __put_user(ptr[i^7], addr + i);
++      if (flags & SW)
++              sw = 7;
++      ret = 0;
++      for (i = 0; i < 8; ++i) {
++              if (!(flags & ST)) {
++                      ret |= __get_user(ptr0[i^sw], addr + i);
++                      ret |= __get_user(ptr1[i^sw], addr + i + 8);
++              } else {
++                      ret |= __put_user(ptr0[i^sw], addr + i);
++                      ret |= __put_user(ptr1[i^sw], addr + i + 8);
+               }
+       }
+       if (ret)
+@@ -646,11 +643,16 @@
+                      unsigned int areg, struct pt_regs *regs,
+                      unsigned int flags, unsigned int length)
+ {
+-      char *ptr = (char *) &current->thread.TS_FPR(reg);
++      char *ptr;
+       int ret = 0;
+       flush_vsx_to_thread(current);
++      if (reg < 32)
++              ptr = (char *) &current->thread.TS_FPR(reg);
++      else
++              ptr = (char *) &current->thread.vr[reg - 32];
++
+       if (flags & ST)
+               ret = __copy_to_user(addr, ptr, length);
+         else {
+--- kernel-maemo-2.6.28.test.orig/arch/powerpc/kernel/signal.c
++++ kernel-maemo-2.6.28.test/arch/powerpc/kernel/signal.c
+@@ -26,12 +26,12 @@
+  * Allocate space for the signal frame
+  */
+ void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
+-                         size_t frame_size)
++                         size_t frame_size, int is_32)
+ {
+         unsigned long oldsp, newsp;
+         /* Default to using normal stack */
+-        oldsp = regs->gpr[1];
++        oldsp = get_clean_sp(regs, is_32);
+       /* Check for alt stack */
+       if ((ka->sa.sa_flags & SA_ONSTACK) &&
+--- kernel-maemo-2.6.28.test.orig/arch/powerpc/kernel/signal.h
++++ kernel-maemo-2.6.28.test/arch/powerpc/kernel/signal.h
+@@ -15,7 +15,7 @@
+ extern void do_signal(struct pt_regs *regs, unsigned long thread_info_flags);
+ extern void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
+-                                size_t frame_size);
++                                size_t frame_size, int is_32);
+ extern void restore_sigmask(sigset_t *set);
+ extern int handle_signal32(unsigned long sig, struct k_sigaction *ka,
+--- kernel-maemo-2.6.28.test.orig/arch/powerpc/kernel/signal_32.c
++++ kernel-maemo-2.6.28.test/arch/powerpc/kernel/signal_32.c
+@@ -836,7 +836,7 @@
+       /* Set up Signal Frame */
+       /* Put a Real Time Context onto stack */
+-      rt_sf = get_sigframe(ka, regs, sizeof(*rt_sf));
++      rt_sf = get_sigframe(ka, regs, sizeof(*rt_sf), 1);
+       addr = rt_sf;
+       if (unlikely(rt_sf == NULL))
+               goto badframe;
+@@ -1182,7 +1182,7 @@
+       unsigned long newsp = 0;
+       /* Set up Signal Frame */
+-      frame = get_sigframe(ka, regs, sizeof(*frame));
++      frame = get_sigframe(ka, regs, sizeof(*frame), 1);
+       if (unlikely(frame == NULL))
+               goto badframe;
+       sc = (struct sigcontext __user *) &frame->sctx;
+--- kernel-maemo-2.6.28.test.orig/arch/powerpc/kernel/signal_64.c
++++ kernel-maemo-2.6.28.test/arch/powerpc/kernel/signal_64.c
+@@ -402,7 +402,7 @@
+       unsigned long newsp = 0;
+       long err = 0;
+-      frame = get_sigframe(ka, regs, sizeof(*frame));
++      frame = get_sigframe(ka, regs, sizeof(*frame), 0);
+       if (unlikely(frame == NULL))
+               goto badframe;
+--- kernel-maemo-2.6.28.test.orig/arch/powerpc/mm/fsl_booke_mmu.c
++++ kernel-maemo-2.6.28.test/arch/powerpc/mm/fsl_booke_mmu.c
+@@ -80,7 +80,7 @@
+ /*
+  * Return PA for this VA if it is mapped by a CAM, or 0
+  */
+-unsigned long v_mapped_by_tlbcam(unsigned long va)
++phys_addr_t v_mapped_by_tlbcam(unsigned long va)
+ {
+       int b;
+       for (b = 0; b < tlbcam_index; ++b)
+@@ -92,7 +92,7 @@
+ /*
+  * Return VA for a given PA or 0 if not mapped
+  */
+-unsigned long p_mapped_by_tlbcam(unsigned long pa)
++unsigned long p_mapped_by_tlbcam(phys_addr_t pa)
+ {
+       int b;
+       for (b = 0; b < tlbcam_index; ++b)
+--- kernel-maemo-2.6.28.test.orig/arch/powerpc/mm/pgtable_32.c
++++ kernel-maemo-2.6.28.test/arch/powerpc/mm/pgtable_32.c
+@@ -65,8 +65,8 @@
+ #ifdef HAVE_TLBCAM
+ extern unsigned int tlbcam_index;
+-extern unsigned long v_mapped_by_tlbcam(unsigned long va);
+-extern unsigned long p_mapped_by_tlbcam(unsigned long pa);
++extern phys_addr_t v_mapped_by_tlbcam(unsigned long va);
++extern unsigned long p_mapped_by_tlbcam(phys_addr_t pa);
+ #else /* !HAVE_TLBCAM */
+ #define v_mapped_by_tlbcam(x) (0UL)
+ #define p_mapped_by_tlbcam(x) (0UL)
+--- kernel-maemo-2.6.28.test.orig/arch/powerpc/mm/slice.c
++++ kernel-maemo-2.6.28.test/arch/powerpc/mm/slice.c
+@@ -710,9 +710,18 @@
+                          unsigned long len)
+ {
+       struct slice_mask mask, available;
++      unsigned int psize = mm->context.user_psize;
+       mask = slice_range_to_mask(addr, len);
+-      available = slice_mask_for_size(mm, mm->context.user_psize);
++      available = slice_mask_for_size(mm, psize);
++#ifdef CONFIG_PPC_64K_PAGES
++      /* We need to account for 4k slices too */
++      if (psize == MMU_PAGE_64K) {
++              struct slice_mask compat_mask;
++              compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K);
++              or_mask(available, compat_mask);
++      }
++#endif
+ #if 0 /* too verbose */
+       slice_dbg("is_hugepage_only_range(mm=%p, addr=%lx, len=%lx)\n",
+--- kernel-maemo-2.6.28.test.orig/arch/powerpc/platforms/pseries/Kconfig
++++ kernel-maemo-2.6.28.test/arch/powerpc/platforms/pseries/Kconfig
+@@ -54,7 +54,7 @@
+ config CMM
+       tristate "Collaborative memory management"
+-      depends on PPC_SMLPAR
++      depends on PPC_SMLPAR && !CRASH_DUMP
+       default y
+       help
+         Select this option, if you want to enable the kernel interface
+--- kernel-maemo-2.6.28.test.orig/arch/powerpc/sysdev/fsl_soc.c
++++ kernel-maemo-2.6.28.test/arch/powerpc/sysdev/fsl_soc.c
+@@ -257,7 +257,7 @@
+               gfar_mdio_of_init_one(np);
+       /* try the deprecated version */
+-      for_each_compatible_node(np, "mdio", "gianfar");
++      for_each_compatible_node(np, "mdio", "gianfar")
+               gfar_mdio_of_init_one(np);
+       return 0;
+--- kernel-maemo-2.6.28.test.orig/arch/s390/Kconfig
++++ kernel-maemo-2.6.28.test/arch/s390/Kconfig
+@@ -70,6 +70,7 @@
+ config S390
+       def_bool y
++      select HAVE_SYSCALL_WRAPPERS
+       select HAVE_OPROFILE
+       select HAVE_KPROBES
+       select HAVE_KRETPROBES
+--- kernel-maemo-2.6.28.test.orig/arch/s390/kernel/compat_wrapper.S
++++ kernel-maemo-2.6.28.test/arch/s390/kernel/compat_wrapper.S
+@@ -547,7 +547,7 @@
+       .globl  sys32_newuname_wrapper
+ sys32_newuname_wrapper:
+       llgtr   %r2,%r2                 # struct new_utsname *
+-      jg      s390x_newuname          # branch to system call
++      jg      sys_s390_newuname       # branch to system call
+       .globl  compat_sys_adjtimex_wrapper
+ compat_sys_adjtimex_wrapper:
+@@ -615,7 +615,7 @@
+       .globl  sys32_personality_wrapper
+ sys32_personality_wrapper:
+       llgfr   %r2,%r2                 # unsigned long
+-      jg      s390x_personality       # branch to system call
++      jg      sys_s390_personality    # branch to system call
+       .globl  sys32_setfsuid16_wrapper
+ sys32_setfsuid16_wrapper:
+--- kernel-maemo-2.6.28.test.orig/arch/s390/kernel/entry.h
++++ kernel-maemo-2.6.28.test/arch/s390/kernel/entry.h
+@@ -30,23 +30,23 @@
+ struct old_sigaction;
+ struct sel_arg_struct;
+-long sys_pipe(unsigned long __user *fildes);
+ long sys_mmap2(struct mmap_arg_struct __user  *arg);
+-long old_mmap(struct mmap_arg_struct __user *arg);
++long sys_s390_old_mmap(struct mmap_arg_struct __user *arg);
+ long sys_ipc(uint call, int first, unsigned long second,
+            unsigned long third, void __user *ptr);
+-long s390x_newuname(struct new_utsname __user *name);
+-long s390x_personality(unsigned long personality);
+-long s390_fadvise64(int fd, u32 offset_high, u32 offset_low,
++long sys_s390_newuname(struct new_utsname __user *name);
++long sys_s390_personality(unsigned long personality);
++long sys_s390_fadvise64(int fd, u32 offset_high, u32 offset_low,
+                   size_t len, int advice);
+-long s390_fadvise64_64(struct fadvise64_64_args __user *args);
+-long s390_fallocate(int fd, int mode, loff_t offset, u32 len_high, u32 len_low);
++long sys_s390_fadvise64_64(struct fadvise64_64_args __user *args);
++long sys_s390_fallocate(int fd, int mode, loff_t offset, u32 len_high,
++                      u32 len_low);
+ long sys_fork(void);
+ long sys_clone(void);
+ long sys_vfork(void);
+ void execve_tail(void);
+ long sys_execve(void);
+-int sys_sigsuspend(int history0, int history1, old_sigset_t mask);
++long sys_sigsuspend(int history0, int history1, old_sigset_t mask);
+ long sys_sigaction(int sig, const struct old_sigaction __user *act,
+                  struct old_sigaction __user *oact);
+ long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss);
+--- kernel-maemo-2.6.28.test.orig/arch/s390/kernel/process.c
++++ kernel-maemo-2.6.28.test/arch/s390/kernel/process.c
+@@ -38,6 +38,7 @@
+ #include <linux/utsname.h>
+ #include <linux/tick.h>
+ #include <linux/elfcore.h>
++#include <linux/syscalls.h>
+ #include <asm/uaccess.h>
+ #include <asm/pgtable.h>
+ #include <asm/system.h>
+@@ -260,13 +261,13 @@
+         return 0;
+ }
+-asmlinkage long sys_fork(void)
++SYSCALL_DEFINE0(fork)
+ {
+       struct pt_regs *regs = task_pt_regs(current);
+       return do_fork(SIGCHLD, regs->gprs[15], regs, 0, NULL, NULL);
+ }
+-asmlinkage long sys_clone(void)
++SYSCALL_DEFINE0(clone)
+ {
+       struct pt_regs *regs = task_pt_regs(current);
+       unsigned long clone_flags;
+@@ -293,7 +294,7 @@
+  * do not have enough call-clobbered registers to hold all
+  * the information you need.
+  */
+-asmlinkage long sys_vfork(void)
++SYSCALL_DEFINE0(vfork)
+ {
+       struct pt_regs *regs = task_pt_regs(current);
+       return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD,
+@@ -313,7 +314,7 @@
+ /*
+  * sys_execve() executes a new program.
+  */
+-asmlinkage long sys_execve(void)
++SYSCALL_DEFINE0(execve)
+ {
+       struct pt_regs *regs = task_pt_regs(current);
+       char *filename;
+--- kernel-maemo-2.6.28.test.orig/arch/s390/kernel/signal.c
++++ kernel-maemo-2.6.28.test/arch/s390/kernel/signal.c
+@@ -25,6 +25,7 @@
+ #include <linux/personality.h>
+ #include <linux/binfmts.h>
+ #include <linux/tracehook.h>
++#include <linux/syscalls.h>
+ #include <asm/ucontext.h>
+ #include <asm/uaccess.h>
+ #include <asm/lowcore.h>
+@@ -53,8 +54,7 @@
+ /*
+  * Atomically swap in the new signal mask, and wait for a signal.
+  */
+-asmlinkage int
+-sys_sigsuspend(int history0, int history1, old_sigset_t mask)
++SYSCALL_DEFINE3(sigsuspend, int, history0, int, history1, old_sigset_t, mask)
+ {
+       mask &= _BLOCKABLE;
+       spin_lock_irq(&current->sighand->siglock);
+@@ -70,9 +70,8 @@
+       return -ERESTARTNOHAND;
+ }
+-asmlinkage long
+-sys_sigaction(int sig, const struct old_sigaction __user *act,
+-            struct old_sigaction __user *oact)
++SYSCALL_DEFINE3(sigaction, int, sig, const struct old_sigaction __user *, act,
++              struct old_sigaction __user *, oact)
+ {
+       struct k_sigaction new_ka, old_ka;
+       int ret;
+@@ -102,15 +101,13 @@
+       return ret;
+ }
+-asmlinkage long
+-sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
++SYSCALL_DEFINE2(sigaltstack, const stack_t __user *, uss,
++              stack_t __user *, uoss)
+ {
+       struct pt_regs *regs = task_pt_regs(current);
+       return do_sigaltstack(uss, uoss, regs->gprs[15]);
+ }
+-
+-
+ /* Returns non-zero on fault. */
+ static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
+ {
+@@ -164,7 +161,7 @@
+       return 0;
+ }
+-asmlinkage long sys_sigreturn(void)
++SYSCALL_DEFINE0(sigreturn)
+ {
+       struct pt_regs *regs = task_pt_regs(current);
+       sigframe __user *frame = (sigframe __user *)regs->gprs[15];
+@@ -191,7 +188,7 @@
+       return 0;
+ }
+-asmlinkage long sys_rt_sigreturn(void)
++SYSCALL_DEFINE0(rt_sigreturn)
+ {
+       struct pt_regs *regs = task_pt_regs(current);
+       rt_sigframe __user *frame = (rt_sigframe __user *)regs->gprs[15];
+--- kernel-maemo-2.6.28.test.orig/arch/s390/kernel/sys_s390.c
++++ kernel-maemo-2.6.28.test/arch/s390/kernel/sys_s390.c
+@@ -29,6 +29,7 @@
+ #include <linux/personality.h>
+ #include <linux/unistd.h>
+ #include <linux/ipc.h>
++#include <linux/syscalls.h>
+ #include <asm/uaccess.h>
+ #include "entry.h"
+@@ -74,7 +75,7 @@
+       unsigned long offset;
+ };
+-asmlinkage long sys_mmap2(struct mmap_arg_struct __user  *arg)
++SYSCALL_DEFINE1(mmap2, struct mmap_arg_struct __user *, arg)
+ {
+       struct mmap_arg_struct a;
+       int error = -EFAULT;
+@@ -86,7 +87,7 @@
+       return error;
+ }
+-asmlinkage long old_mmap(struct mmap_arg_struct __user *arg)
++SYSCALL_DEFINE1(s390_old_mmap, struct mmap_arg_struct __user *, arg)
+ {
+       struct mmap_arg_struct a;
+       long error = -EFAULT;
+@@ -127,8 +128,8 @@
+  *
+  * This is really horribly ugly.
+  */
+-asmlinkage long sys_ipc(uint call, int first, unsigned long second,
+-                                unsigned long third, void __user *ptr)
++SYSCALL_DEFINE5(ipc, uint, call, int, first, unsigned long, second,
++              unsigned long, third, void __user *, ptr)
+ {
+         struct ipc_kludge tmp;
+       int ret;
+@@ -194,7 +195,7 @@
+ }
+ #ifdef CONFIG_64BIT
+-asmlinkage long s390x_newuname(struct new_utsname __user *name)
++SYSCALL_DEFINE1(s390_newuname, struct new_utsname __user *, name)
+ {
+       int ret = sys_newuname(name);
+@@ -205,7 +206,7 @@
+       return ret;
+ }
+-asmlinkage long s390x_personality(unsigned long personality)
++SYSCALL_DEFINE1(s390_personality, unsigned long, personality)
+ {
+       int ret;
+@@ -224,15 +225,13 @@
+  */
+ #ifndef CONFIG_64BIT
+-asmlinkage long
+-s390_fadvise64(int fd, u32 offset_high, u32 offset_low, size_t len, int advice)
++SYSCALL_DEFINE5(s390_fadvise64, int, fd, u32, offset_high, u32, offset_low,
++              size_t, len, int, advice)
+ {
+       return sys_fadvise64(fd, (u64) offset_high << 32 | offset_low,
+                       len, advice);
+ }
+-#endif
+-
+ struct fadvise64_64_args {
+       int fd;
+       long long offset;
+@@ -240,8 +239,7 @@
+       int advice;
+ };
+-asmlinkage long
+-s390_fadvise64_64(struct fadvise64_64_args __user *args)
++SYSCALL_DEFINE1(s390_fadvise64_64, struct fadvise64_64_args __user *, args)
+ {
+       struct fadvise64_64_args a;
+@@ -250,7 +248,6 @@
+       return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice);
+ }
+-#ifndef CONFIG_64BIT
+ /*
+  * This is a wrapper to call sys_fallocate(). For 31 bit s390 the last
+  * 64 bit argument "len" is split into the upper and lower 32 bits. The
+@@ -263,9 +260,19 @@
+  * to
+  *   %r2: fd, %r3: mode, %r4/%r5: offset, 96(%r15)-103(%r15): len
+  */
+-asmlinkage long s390_fallocate(int fd, int mode, loff_t offset,
++SYSCALL_DEFINE(s390_fallocate)(int fd, int mode, loff_t offset,
+                              u32 len_high, u32 len_low)
+ {
+       return sys_fallocate(fd, mode, offset, ((u64)len_high << 32) | len_low);
+ }
++#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
++asmlinkage long SyS_s390_fallocate(long fd, long mode, loff_t offset,
++                                 long len_high, long len_low)
++{
++      return SYSC_s390_fallocate((int) fd, (int) mode, offset,
++                                 (u32) len_high, (u32) len_low);
++}
++SYSCALL_ALIAS(sys_s390_fallocate, SyS_s390_fallocate);
++#endif
++
+ #endif
+--- kernel-maemo-2.6.28.test.orig/arch/s390/kernel/syscalls.S
++++ kernel-maemo-2.6.28.test/arch/s390/kernel/syscalls.S
+@@ -98,7 +98,7 @@
+ SYSCALL(sys_swapon,sys_swapon,sys32_swapon_wrapper)
+ SYSCALL(sys_reboot,sys_reboot,sys32_reboot_wrapper)
+ SYSCALL(sys_ni_syscall,sys_ni_syscall,old32_readdir_wrapper)  /* old readdir syscall */
+-SYSCALL(old_mmap,old_mmap,old32_mmap_wrapper)                 /* 90 */
++SYSCALL(sys_s390_old_mmap,sys_s390_old_mmap,old32_mmap_wrapper)       /* 90 */
+ SYSCALL(sys_munmap,sys_munmap,sys32_munmap_wrapper)
+ SYSCALL(sys_truncate,sys_truncate,sys32_truncate_wrapper)
+ SYSCALL(sys_ftruncate,sys_ftruncate,sys32_ftruncate_wrapper)
+@@ -130,7 +130,7 @@
+ SYSCALL(sys_sigreturn,sys_sigreturn,sys32_sigreturn)
+ SYSCALL(sys_clone,sys_clone,sys32_clone)                      /* 120 */
+ SYSCALL(sys_setdomainname,sys_setdomainname,sys32_setdomainname_wrapper)
+-SYSCALL(sys_newuname,s390x_newuname,sys32_newuname_wrapper)
++SYSCALL(sys_newuname,sys_s390_newuname,sys32_newuname_wrapper)
+ NI_SYSCALL                                                    /* modify_ldt for i386 */
+ SYSCALL(sys_adjtimex,sys_adjtimex,compat_sys_adjtimex_wrapper)
+ SYSCALL(sys_mprotect,sys_mprotect,sys32_mprotect_wrapper)     /* 125 */
+@@ -144,7 +144,7 @@
+ SYSCALL(sys_fchdir,sys_fchdir,sys32_fchdir_wrapper)
+ SYSCALL(sys_bdflush,sys_bdflush,sys32_bdflush_wrapper)
+ SYSCALL(sys_sysfs,sys_sysfs,sys32_sysfs_wrapper)              /* 135 */
+-SYSCALL(sys_personality,s390x_personality,sys32_personality_wrapper)
++SYSCALL(sys_personality,sys_s390_personality,sys32_personality_wrapper)
+ NI_SYSCALL                                                    /* for afs_syscall */
+ SYSCALL(sys_setfsuid16,sys_ni_syscall,sys32_setfsuid16_wrapper)       /* old setfsuid16 syscall */
+ SYSCALL(sys_setfsgid16,sys_ni_syscall,sys32_setfsgid16_wrapper)       /* old setfsgid16 syscall */
+@@ -261,7 +261,7 @@
+ SYSCALL(sys_epoll_ctl,sys_epoll_ctl,sys_epoll_ctl_wrapper)    /* 250 */
+ SYSCALL(sys_epoll_wait,sys_epoll_wait,sys_epoll_wait_wrapper)
+ SYSCALL(sys_set_tid_address,sys_set_tid_address,sys32_set_tid_address_wrapper)
+-SYSCALL(s390_fadvise64,sys_fadvise64_64,sys32_fadvise64_wrapper)
++SYSCALL(sys_s390_fadvise64,sys_fadvise64_64,sys32_fadvise64_wrapper)
+ SYSCALL(sys_timer_create,sys_timer_create,sys32_timer_create_wrapper)
+ SYSCALL(sys_timer_settime,sys_timer_settime,sys32_timer_settime_wrapper)      /* 255 */
+ SYSCALL(sys_timer_gettime,sys_timer_gettime,sys32_timer_gettime_wrapper)
+@@ -272,7 +272,7 @@
+ SYSCALL(sys_clock_getres,sys_clock_getres,sys32_clock_getres_wrapper)
+ SYSCALL(sys_clock_nanosleep,sys_clock_nanosleep,sys32_clock_nanosleep_wrapper)
+ NI_SYSCALL                                                    /* reserved for vserver */
+-SYSCALL(s390_fadvise64_64,sys_ni_syscall,sys32_fadvise64_64_wrapper)
++SYSCALL(sys_s390_fadvise64_64,sys_ni_syscall,sys32_fadvise64_64_wrapper)
+ SYSCALL(sys_statfs64,sys_statfs64,compat_sys_statfs64_wrapper)
+ SYSCALL(sys_fstatfs64,sys_fstatfs64,compat_sys_fstatfs64_wrapper)
+ SYSCALL(sys_remap_file_pages,sys_remap_file_pages,sys32_remap_file_pages_wrapper)
+@@ -322,7 +322,7 @@
+ SYSCALL(sys_getcpu,sys_getcpu,sys_getcpu_wrapper)
+ SYSCALL(sys_epoll_pwait,sys_epoll_pwait,compat_sys_epoll_pwait_wrapper)
+ SYSCALL(sys_utimes,sys_utimes,compat_sys_utimes_wrapper)
+-SYSCALL(s390_fallocate,sys_fallocate,sys_fallocate_wrapper)
++SYSCALL(sys_s390_fallocate,sys_fallocate,sys_fallocate_wrapper)
+ SYSCALL(sys_utimensat,sys_utimensat,compat_sys_utimensat_wrapper)     /* 315 */
+ SYSCALL(sys_signalfd,sys_signalfd,compat_sys_signalfd_wrapper)
+ NI_SYSCALL                                            /* 317 old sys_timer_fd */
+--- kernel-maemo-2.6.28.test.orig/arch/s390/lib/div64.c
++++ kernel-maemo-2.6.28.test/arch/s390/lib/div64.c
+@@ -61,7 +61,7 @@
+               "       clr     %0,%3\n"
+               "       jl      0f\n"
+               "       slr     %0,%3\n"
+-              "       alr     %1,%2\n"
++              "       ahi     %1,1\n"
+               "0:\n"
+               : "+d" (reg2), "+d" (reg3), "=d" (tmp)
+               : "d" (base), "2" (1UL) : "cc" );
+--- kernel-maemo-2.6.28.test.orig/arch/sh/include/asm/syscalls_32.h
++++ kernel-maemo-2.6.28.test/arch/sh/include/asm/syscalls_32.h
+@@ -36,9 +36,9 @@
+ asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5,
+                               unsigned long r6, unsigned long r7,
+                               struct pt_regs __regs);
+-asmlinkage int sys_pipe(unsigned long r4, unsigned long r5,
+-                      unsigned long r6, unsigned long r7,
+-                      struct pt_regs __regs);
++asmlinkage int sys_sh_pipe(unsigned long r4, unsigned long r5,
++                         unsigned long r6, unsigned long r7,
++                         struct pt_regs __regs);
+ asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char __user *buf,
+                                    size_t count, long dummy, loff_t pos);
+ asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char __user *buf,
+--- kernel-maemo-2.6.28.test.orig/arch/sh/kernel/sys_sh32.c
++++ kernel-maemo-2.6.28.test/arch/sh/kernel/sys_sh32.c
+@@ -22,7 +22,7 @@
+  * sys_pipe() is the normal C calling standard for creating
+  * a pipe. It's not the way Unix traditionally does this, though.
+  */
+-asmlinkage int sys_pipe(unsigned long r4, unsigned long r5,
++asmlinkage int sys_sh_pipe(unsigned long r4, unsigned long r5,
+       unsigned long r6, unsigned long r7,
+       struct pt_regs __regs)
+ {
+--- kernel-maemo-2.6.28.test.orig/arch/sh/kernel/syscalls_32.S
++++ kernel-maemo-2.6.28.test/arch/sh/kernel/syscalls_32.S
+@@ -58,7 +58,7 @@
+       .long sys_mkdir
+       .long sys_rmdir         /* 40 */
+       .long sys_dup
+-      .long sys_pipe
++      .long sys_sh_pipe
+       .long sys_times
+       .long sys_ni_syscall    /* old prof syscall holder */
+       .long sys_brk           /* 45 */
+@@ -105,7 +105,7 @@
+       .long sys_uselib
+       .long sys_swapon
+       .long sys_reboot
+-      .long old_readdir
++      .long sys_old_readdir
+       .long old_mmap          /* 90 */
+       .long sys_munmap
+       .long sys_truncate
+--- kernel-maemo-2.6.28.test.orig/arch/sh/kernel/syscalls_64.S
++++ kernel-maemo-2.6.28.test/arch/sh/kernel/syscalls_64.S
+@@ -109,7 +109,7 @@
+       .long sys_uselib
+       .long sys_swapon
+       .long sys_reboot
+-      .long old_readdir
++      .long sys_old_readdir
+       .long old_mmap                  /* 90 */
+       .long sys_munmap
+       .long sys_truncate
+--- kernel-maemo-2.6.28.test.orig/arch/sparc/include/asm/compat.h
++++ kernel-maemo-2.6.28.test/arch/sparc/include/asm/compat.h
+@@ -240,4 +240,9 @@
+       unsigned int    __unused2;
+ };
++static inline int is_compat_task(void)
++{
++      return test_thread_flag(TIF_32BIT);
++}
++
+ #endif /* _ASM_SPARC64_COMPAT_H */
+--- kernel-maemo-2.6.28.test.orig/arch/sparc/include/asm/seccomp.h
++++ kernel-maemo-2.6.28.test/arch/sparc/include/asm/seccomp.h
+@@ -1,11 +1,5 @@
+ #ifndef _ASM_SECCOMP_H
+-#include <linux/thread_info.h> /* already defines TIF_32BIT */
+-
+-#ifndef TIF_32BIT
+-#error "unexpected TIF_32BIT on sparc64"
+-#endif
+-
+ #include <linux/unistd.h>
+ #define __NR_seccomp_read __NR_read
+--- kernel-maemo-2.6.28.test.orig/arch/sparc/kernel/entry.S
++++ kernel-maemo-2.6.28.test/arch/sparc/kernel/entry.S
+@@ -1088,8 +1088,8 @@
+        ld     [%sp + STACKFRAME_SZ + PT_I0], %o0
+       .align  4
+-      .globl  sys_pipe
+-sys_pipe:
++      .globl  sys_sparc_pipe
++sys_sparc_pipe:
+       mov     %o7, %l5
+       add     %sp, STACKFRAME_SZ, %o0         ! pt_regs *regs arg
+       call    sparc_pipe
+--- kernel-maemo-2.6.28.test.orig/arch/sparc/kernel/systbls.S
++++ kernel-maemo-2.6.28.test/arch/sparc/kernel/systbls.S
+@@ -24,7 +24,7 @@
+ /*25*/        .long sys_vmsplice, sys_ptrace, sys_alarm, sys_sigaltstack, sys_pause
+ /*30*/        .long sys_utime, sys_lchown, sys_fchown, sys_access, sys_nice
+ /*35*/        .long sys_chown, sys_sync, sys_kill, sys_newstat, sys_sendfile
+-/*40*/        .long sys_newlstat, sys_dup, sys_pipe, sys_times, sys_getuid
++/*40*/        .long sys_newlstat, sys_dup, sys_sparc_pipe, sys_times, sys_getuid
+ /*45*/        .long sys_umount, sys_setgid16, sys_getgid16, sys_signal, sys_geteuid16
+ /*50*/        .long sys_getegid16, sys_acct, sys_nis_syscall, sys_getgid, sys_ioctl
+ /*55*/        .long sys_reboot, sys_mmap2, sys_symlink, sys_readlink, sys_execve
+@@ -56,7 +56,7 @@
+ /*185*/       .long sys_setpgid, sys_fremovexattr, sys_tkill, sys_exit_group, sys_newuname
+ /*190*/       .long sys_init_module, sys_personality, sparc_remap_file_pages, sys_epoll_create, sys_epoll_ctl
+ /*195*/       .long sys_epoll_wait, sys_ioprio_set, sys_getppid, sparc_sigaction, sys_sgetmask
+-/*200*/       .long sys_ssetmask, sys_sigsuspend, sys_newlstat, sys_uselib, old_readdir
++/*200*/       .long sys_ssetmask, sys_sigsuspend, sys_newlstat, sys_uselib, sys_old_readdir
+ /*205*/       .long sys_readahead, sys_socketcall, sys_syslog, sys_lookup_dcookie, sys_fadvise64
+ /*210*/       .long sys_fadvise64_64, sys_tgkill, sys_waitpid, sys_swapoff, sys_sysinfo
+ /*215*/       .long sys_ipc, sys_sigreturn, sys_clone, sys_ioprio_get, sys_adjtimex
+--- kernel-maemo-2.6.28.test.orig/arch/sparc64/Kconfig
++++ kernel-maemo-2.6.28.test/arch/sparc64/Kconfig
+@@ -14,6 +14,7 @@
+       select HAVE_FUNCTION_TRACER
+       select HAVE_IDE
+       select HAVE_LMB
++      select HAVE_SYSCALL_WRAPPERS
+       select HAVE_ARCH_KGDB
+       select USE_GENERIC_SMP_HELPERS if SMP
+       select HAVE_ARCH_TRACEHOOK
+--- kernel-maemo-2.6.28.test.orig/arch/sparc64/kernel/chmc.c
++++ kernel-maemo-2.6.28.test/arch/sparc64/kernel/chmc.c
+@@ -306,6 +306,7 @@
+               buf[1] = '?';
+               buf[2] = '?';
+               buf[3] = '\0';
++              return 0;
+       }
+       p = dp->controller;
+       prop = &p->layout;
+--- kernel-maemo-2.6.28.test.orig/arch/sparc64/kernel/sys_sparc.c
++++ kernel-maemo-2.6.28.test/arch/sparc64/kernel/sys_sparc.c
+@@ -397,7 +397,7 @@
+       }
+ }
+-asmlinkage unsigned long sparc_brk(unsigned long brk)
++SYSCALL_DEFINE1(sparc_brk, unsigned long, brk)
+ {
+       /* People could try to be nasty and use ta 0x6d in 32bit programs */
+       if (test_thread_flag(TIF_32BIT) && brk >= STACK_TOP32)
+@@ -413,7 +413,7 @@
+  * sys_pipe() is the normal C calling standard for creating
+  * a pipe. It's not the way unix traditionally does this, though.
+  */
+-asmlinkage long sparc_pipe(struct pt_regs *regs)
++SYSCALL_DEFINE1(sparc_pipe_real, struct pt_regs *, regs)
+ {
+       int fd[2];
+       int error;
+@@ -433,8 +433,8 @@
+  * This is really horribly ugly.
+  */
+-asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second,
+-                      unsigned long third, void __user *ptr, long fifth)
++SYSCALL_DEFINE6(ipc, unsigned int, call, int, first, unsigned long, second,
++              unsigned long, third, void __user *, ptr, long, fifth)
+ {
+       long err;
+@@ -517,7 +517,7 @@
+       return err;
+ }
+-asmlinkage long sparc64_newuname(struct new_utsname __user *name)
++SYSCALL_DEFINE1(sparc64_newuname, struct new_utsname __user *, name)
+ {
+       int ret = sys_newuname(name);
+       
+@@ -528,7 +528,7 @@
+       return ret;
+ }
+-asmlinkage long sparc64_personality(unsigned long personality)
++SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
+ {
+       int ret;
+@@ -562,9 +562,9 @@
+ }
+ /* Linux version of mmap */
+-asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
+-      unsigned long prot, unsigned long flags, unsigned long fd,
+-      unsigned long off)
++SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
++              unsigned long, prot, unsigned long, flags, unsigned long, fd,
++              unsigned long, off)
+ {
+       struct file * file = NULL;
+       unsigned long retval = -EBADF;
+@@ -587,7 +587,7 @@
+       return retval;
+ }
+-asmlinkage long sys64_munmap(unsigned long addr, size_t len)
++SYSCALL_DEFINE2(64_munmap, unsigned long, addr, size_t, len)
+ {
+       long ret;
+@@ -604,9 +604,9 @@
+       unsigned long old_len, unsigned long new_len,
+       unsigned long flags, unsigned long new_addr);
+                 
+-asmlinkage unsigned long sys64_mremap(unsigned long addr,
+-      unsigned long old_len, unsigned long new_len,
+-      unsigned long flags, unsigned long new_addr)
++SYSCALL_DEFINE5(64_mremap, unsigned long, addr,       unsigned long, old_len,
++              unsigned long, new_len, unsigned long, flags,
++              unsigned long, new_addr)
+ {
+       unsigned long ret = -EINVAL;
+@@ -669,7 +669,7 @@
+ extern void check_pending(int signum);
+-asmlinkage long sys_getdomainname(char __user *name, int len)
++SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len)
+ {
+         int nlen, err;
+@@ -692,11 +692,10 @@
+       return err;
+ }
+-asmlinkage long sys_utrap_install(utrap_entry_t type,
+-                                utrap_handler_t new_p,
+-                                utrap_handler_t new_d,
+-                                utrap_handler_t __user *old_p,
+-                                utrap_handler_t __user *old_d)
++SYSCALL_DEFINE5(utrap_install, utrap_entry_t, type,
++              utrap_handler_t, new_p, utrap_handler_t, new_d,
++              utrap_handler_t __user *, old_p,
++              utrap_handler_t __user *, old_d)
+ {
+       if (type < UT_INSTRUCTION_EXCEPTION || type > UT_TRAP_INSTRUCTION_31)
+               return -EINVAL;
+@@ -762,11 +761,9 @@
+       return 0;
+ }
+-asmlinkage long sys_rt_sigaction(int sig,
+-                               const struct sigaction __user *act,
+-                               struct sigaction __user *oact,
+-                               void __user *restorer,
+-                               size_t sigsetsize)
++SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act,
++              struct sigaction __user *, oact, void __user *, restorer,
++              size_t, sigsetsize)
+ {
+       struct k_sigaction new_ka, old_ka;
+       int ret;
+@@ -806,7 +803,8 @@
+       reset_pic();
+ }
+-asmlinkage long sys_perfctr(int opcode, unsigned long arg0, unsigned long arg1, unsigned long arg2)
++SYSCALL_DEFINE4(perfctr, int, opcode, unsigned long, arg0,
++              unsigned long, arg1, unsigned long, arg2)
+ {
+       int err = 0;
+--- kernel-maemo-2.6.28.test.orig/arch/sparc64/kernel/syscalls.S
++++ kernel-maemo-2.6.28.test/arch/sparc64/kernel/syscalls.S
+@@ -20,8 +20,8 @@
+        add    %sp, PTREGS_OFF, %o0
+       .align  32
+-sys_pipe:
+-      ba,pt   %xcc, sparc_pipe
++sys_sparc_pipe:
++      ba,pt   %xcc, sys_sparc_pipe_real
+        add    %sp, PTREGS_OFF, %o0
+ sys_nis_syscall:
+       ba,pt   %xcc, c_sys_nis_syscall
+--- kernel-maemo-2.6.28.test.orig/arch/sparc64/kernel/systbls.S
++++ kernel-maemo-2.6.28.test/arch/sparc64/kernel/systbls.S
+@@ -21,12 +21,12 @@
+ /*0*/ .word sys_restart_syscall, sys32_exit, sys_fork, sys_read, sys_write
+ /*5*/ .word sys32_open, sys_close, sys32_wait4, sys32_creat, sys_link
+ /*10*/  .word sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys32_mknod
+-/*15*/        .word sys_chmod, sys_lchown16, sparc_brk, sys32_perfctr, sys32_lseek
++/*15*/        .word sys_chmod, sys_lchown16, sys_sparc_brk, sys32_perfctr, sys32_lseek
+ /*20*/        .word sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16
+ /*25*/        .word sys32_vmsplice, compat_sys_ptrace, sys_alarm, sys32_sigaltstack, sys_pause
+ /*30*/        .word compat_sys_utime, sys_lchown, sys_fchown, sys32_access, sys32_nice
+       .word sys_chown, sys_sync, sys32_kill, compat_sys_newstat, sys32_sendfile
+-/*40*/        .word compat_sys_newlstat, sys_dup, sys_pipe, compat_sys_times, sys_getuid
++/*40*/        .word compat_sys_newlstat, sys_dup, sys_sparc_pipe, compat_sys_times, sys_getuid
+       .word sys32_umount, sys_setgid16, sys_getgid16, sys32_signal, sys_geteuid16
+ /*50*/        .word sys_getegid16, sys_acct, sys_nis_syscall, sys_getgid, compat_sys_ioctl
+       .word sys32_reboot, sys32_mmap2, sys_symlink, sys32_readlink, sys32_execve
+@@ -55,8 +55,8 @@
+ /*170*/       .word sys32_lsetxattr, sys32_fsetxattr, sys_getxattr, sys_lgetxattr, compat_sys_getdents
+       .word sys_setsid, sys_fchdir, sys32_fgetxattr, sys_listxattr, sys_llistxattr
+ /*180*/       .word sys32_flistxattr, sys_removexattr, sys_lremovexattr, compat_sys_sigpending, sys_ni_syscall
+-      .word sys32_setpgid, sys32_fremovexattr, sys32_tkill, sys32_exit_group, sparc64_newuname
+-/*190*/       .word sys32_init_module, sparc64_personality, sys_remap_file_pages, sys32_epoll_create, sys32_epoll_ctl
++      .word sys32_setpgid, sys32_fremovexattr, sys32_tkill, sys32_exit_group, sys_sparc64_newuname
++/*190*/       .word sys32_init_module, sys_sparc64_personality, sys_remap_file_pages, sys32_epoll_create, sys32_epoll_ctl
+       .word sys32_epoll_wait, sys32_ioprio_set, sys_getppid, sys32_sigaction, sys_sgetmask
+ /*200*/       .word sys32_ssetmask, sys_sigsuspend, compat_sys_newlstat, sys_uselib, compat_sys_old_readdir
+       .word sys32_readahead, sys32_socketcall, sys32_syslog, sys32_lookup_dcookie, sys32_fadvise64
+@@ -95,18 +95,18 @@
+ /*0*/ .word sys_restart_syscall, sparc_exit, sys_fork, sys_read, sys_write
+ /*5*/ .word sys_open, sys_close, sys_wait4, sys_creat, sys_link
+ /*10*/  .word sys_unlink, sys_nis_syscall, sys_chdir, sys_chown, sys_mknod
+-/*15*/        .word sys_chmod, sys_lchown, sparc_brk, sys_perfctr, sys_lseek
++/*15*/        .word sys_chmod, sys_lchown, sys_sparc_brk, sys_perfctr, sys_lseek
+ /*20*/        .word sys_getpid, sys_capget, sys_capset, sys_setuid, sys_getuid
+ /*25*/        .word sys_vmsplice, sys_ptrace, sys_alarm, sys_sigaltstack, sys_nis_syscall
+ /*30*/        .word sys_utime, sys_nis_syscall, sys_nis_syscall, sys_access, sys_nice
+       .word sys_nis_syscall, sys_sync, sys_kill, sys_newstat, sys_sendfile64
+-/*40*/        .word sys_newlstat, sys_dup, sys_pipe, sys_times, sys_nis_syscall
++/*40*/        .word sys_newlstat, sys_dup, sys_sparc_pipe, sys_times, sys_nis_syscall
+       .word sys_umount, sys_setgid, sys_getgid, sys_signal, sys_geteuid
+ /*50*/        .word sys_getegid, sys_acct, sys_memory_ordering, sys_nis_syscall, sys_ioctl
+       .word sys_reboot, sys_nis_syscall, sys_symlink, sys_readlink, sys_execve
+ /*60*/        .word sys_umask, sys_chroot, sys_newfstat, sys_fstat64, sys_getpagesize
+       .word sys_msync, sys_vfork, sys_pread64, sys_pwrite64, sys_nis_syscall
+-/*70*/        .word sys_nis_syscall, sys_mmap, sys_nis_syscall, sys64_munmap, sys_mprotect
++/*70*/        .word sys_nis_syscall, sys_mmap, sys_nis_syscall, sys_64_munmap, sys_mprotect
+       .word sys_madvise, sys_vhangup, sys_nis_syscall, sys_mincore, sys_getgroups
+ /*80*/        .word sys_setgroups, sys_getpgrp, sys_nis_syscall, sys_setitimer, sys_nis_syscall
+       .word sys_swapon, sys_getitimer, sys_nis_syscall, sys_sethostname, sys_nis_syscall
+@@ -129,8 +129,8 @@
+ /*170*/       .word sys_lsetxattr, sys_fsetxattr, sys_getxattr, sys_lgetxattr, sys_getdents
+       .word sys_setsid, sys_fchdir, sys_fgetxattr, sys_listxattr, sys_llistxattr
+ /*180*/       .word sys_flistxattr, sys_removexattr, sys_lremovexattr, sys_nis_syscall, sys_ni_syscall
+-      .word sys_setpgid, sys_fremovexattr, sys_tkill, sys_exit_group, sparc64_newuname
+-/*190*/       .word sys_init_module, sparc64_personality, sys_remap_file_pages, sys_epoll_create, sys_epoll_ctl
++      .word sys_setpgid, sys_fremovexattr, sys_tkill, sys_exit_group, sys_sparc64_newuname
++/*190*/       .word sys_init_module, sys_sparc64_personality, sys_remap_file_pages, sys_epoll_create, sys_epoll_ctl
+       .word sys_epoll_wait, sys_ioprio_set, sys_getppid, sys_nis_syscall, sys_sgetmask
+ /*200*/       .word sys_ssetmask, sys_nis_syscall, sys_newlstat, sys_uselib, sys_nis_syscall
+       .word sys_readahead, sys_socketcall, sys_syslog, sys_lookup_dcookie, sys_fadvise64
+@@ -142,7 +142,7 @@
+       .word sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall
+ /*240*/       .word sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler
+       .word sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep
+-/*250*/       .word sys64_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl
++/*250*/       .word sys_64_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl
+       .word sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep
+ /*260*/       .word sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun
+       .word sys_timer_delete, sys_timer_create, sys_ni_syscall, sys_io_setup, sys_io_destroy
+--- kernel-maemo-2.6.28.test.orig/arch/sparc64/kernel/systbls.h
++++ kernel-maemo-2.6.28.test/arch/sparc64/kernel/systbls.h
+@@ -16,9 +16,6 @@
+                              void __user *ptr, long fifth);
+ extern asmlinkage long sparc64_newuname(struct new_utsname __user *name);
+ extern asmlinkage long sparc64_personality(unsigned long personality);
+-extern asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
+-                                       unsigned long prot, unsigned long flags,
+-                                       unsigned long fd, unsigned long off);
+ extern asmlinkage long sys64_munmap(unsigned long addr, size_t len);
+ extern asmlinkage unsigned long sys64_mremap(unsigned long addr,
+                                            unsigned long old_len,
+--- kernel-maemo-2.6.28.test.orig/arch/sparc64/kernel/traps.c
++++ kernel-maemo-2.6.28.test/arch/sparc64/kernel/traps.c
+@@ -1,6 +1,6 @@
+ /* arch/sparc64/kernel/traps.c
+  *
+- * Copyright (C) 1995,1997,2008 David S. Miller (davem@davemloft.net)
++ * Copyright (C) 1995,1997,2008,2009 David S. Miller (davem@davemloft.net)
+  * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
+  */
+@@ -313,6 +313,21 @@
+               return;
+       if (regs->tstate & TSTATE_PRIV) {
++              /* Test if this comes from uaccess places. */
++              const struct exception_table_entry *entry;
++
++              entry = search_exception_tables(regs->tpc);
++              if (entry) {
++                      /* Ouch, somebody is trying VM hole tricks on us... */
++#ifdef DEBUG_EXCEPTIONS
++                      printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
++                      printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
++                             regs->tpc, entry->fixup);
++#endif
++                      regs->tpc = entry->fixup;
++                      regs->tnpc = regs->tpc + 4;
++                      return;
++              }
+               printk("sun4v_data_access_exception: ADDR[%016lx] "
+                      "CTX[%04x] TYPE[%04x], going.\n",
+                      addr, ctx, type);
+--- kernel-maemo-2.6.28.test.orig/arch/x86/Kconfig
++++ kernel-maemo-2.6.28.test/arch/x86/Kconfig
+@@ -569,7 +569,7 @@
+ # need this always selected by IOMMU for the VIA workaround
+ config SWIOTLB
+-      bool
++      def_bool y if X86_64
+       help
+         Support for software bounce buffers used on x86-64 systems
+         which don't have a hardware IOMMU (e.g. the current generation
+--- kernel-maemo-2.6.28.test.orig/arch/x86/boot/memory.c
++++ kernel-maemo-2.6.28.test/arch/x86/boot/memory.c
+@@ -27,13 +27,14 @@
+       do {
+               size = sizeof(struct e820entry);
+-              /* Important: %edx is clobbered by some BIOSes,
+-                 so it must be either used for the error output
++              /* Important: %edx and %esi are clobbered by some BIOSes,
++                 so they must be either used for the error output
+                  or explicitly marked clobbered. */
+               asm("int $0x15; setc %0"
+                   : "=d" (err), "+b" (next), "=a" (id), "+c" (size),
+                     "=m" (*desc)
+-                  : "D" (desc), "d" (SMAP), "a" (0xe820));
++                  : "D" (desc), "d" (SMAP), "a" (0xe820)
++                  : "esi");
+               /* BIOSes which terminate the chain with CF = 1 as opposed
+                  to %ebx = 0 don't always report the SMAP signature on
+--- kernel-maemo-2.6.28.test.orig/arch/x86/ia32/ia32entry.S
++++ kernel-maemo-2.6.28.test/arch/x86/ia32/ia32entry.S
+@@ -418,9 +418,9 @@
+       orl   $TS_COMPAT,TI_status(%r10)
+       testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
+       jnz ia32_tracesys
+-ia32_do_syscall:      
+       cmpl $(IA32_NR_syscalls-1),%eax
+-      ja  int_ret_from_sys_call       /* ia32_tracesys has set RAX(%rsp) */
++      ja ia32_badsys
++ia32_do_call:
+       IA32_ARG_FIXUP
+       call *ia32_sys_call_table(,%rax,8) # xxx: rip relative
+ ia32_sysret:
+@@ -435,7 +435,9 @@
+       call syscall_trace_enter
+       LOAD_ARGS32 ARGOFFSET  /* reload args from stack in case ptrace changed it */
+       RESTORE_REST
+-      jmp ia32_do_syscall
++      cmpl $(IA32_NR_syscalls-1),%eax
++      ja  int_ret_from_sys_call       /* ia32_tracesys has set RAX(%rsp) */
++      jmp ia32_do_call
+ END(ia32_syscall)
+ ia32_badsys:
+--- kernel-maemo-2.6.28.test.orig/arch/x86/include/asm/kvm_host.h
++++ kernel-maemo-2.6.28.test/arch/x86/include/asm/kvm_host.h
+@@ -190,9 +190,11 @@
+       u64 *spt;
+       /* hold the gfn of each spte inside spt */
+       gfn_t *gfns;
+-      unsigned long slot_bitmap; /* One bit set per slot which has memory
+-                                  * in this shadow page.
+-                                  */
++      /*
++       * One bit set per slot which has memory
++       * in this shadow page.
++       */
++      DECLARE_BITMAP(slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
+       int multimapped;         /* More than one parent_pte? */
+       int root_count;          /* Currently serving as active root */
+       bool unsync;
+@@ -607,6 +609,8 @@
+ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
+ int complete_pio(struct kvm_vcpu *vcpu);
++struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn);
++
+ static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
+ {
+       struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
+--- kernel-maemo-2.6.28.test.orig/arch/x86/include/asm/math_emu.h
++++ kernel-maemo-2.6.28.test/arch/x86/include/asm/math_emu.h
+@@ -1,31 +1,18 @@
+ #ifndef _ASM_X86_MATH_EMU_H
+ #define _ASM_X86_MATH_EMU_H
++#include <asm/ptrace.h>
++#include <asm/vm86.h>
++
+ /* This structure matches the layout of the data saved to the stack
+    following a device-not-present interrupt, part of it saved
+    automatically by the 80386/80486.
+    */
+-struct info {
++struct math_emu_info {
+       long ___orig_eip;
+-      long ___ebx;
+-      long ___ecx;
+-      long ___edx;
+-      long ___esi;
+-      long ___edi;
+-      long ___ebp;
+-      long ___eax;
+-      long ___ds;
+-      long ___es;
+-      long ___fs;
+-      long ___orig_eax;
+-      long ___eip;
+-      long ___cs;
+-      long ___eflags;
+-      long ___esp;
+-      long ___ss;
+-      long ___vm86_es; /* This and the following only in vm86 mode */
+-      long ___vm86_ds;
+-      long ___vm86_fs;
+-      long ___vm86_gs;
++      union {
++              struct pt_regs *regs;
++              struct kernel_vm86_regs *vm86;
++      };
+ };
+ #endif /* _ASM_X86_MATH_EMU_H */
+--- kernel-maemo-2.6.28.test.orig/arch/x86/include/asm/mmzone_32.h
++++ kernel-maemo-2.6.28.test/arch/x86/include/asm/mmzone_32.h
+@@ -32,8 +32,6 @@
+       get_memcfg_numa_flat();
+ }
+-extern int early_pfn_to_nid(unsigned long pfn);
+-
+ extern void resume_map_numa_kva(pgd_t *pgd);
+ #else /* !CONFIG_NUMA */
+--- kernel-maemo-2.6.28.test.orig/arch/x86/include/asm/mmzone_64.h
++++ kernel-maemo-2.6.28.test/arch/x86/include/asm/mmzone_64.h
+@@ -40,8 +40,6 @@
+ #define node_end_pfn(nid)       (NODE_DATA(nid)->node_start_pfn +     \
+                                NODE_DATA(nid)->node_spanned_pages)
+-extern int early_pfn_to_nid(unsigned long pfn);
+-
+ #ifdef CONFIG_NUMA_EMU
+ #define FAKE_NODE_MIN_SIZE    (64 * 1024 * 1024)
+ #define FAKE_NODE_MIN_HASH_MASK       (~(FAKE_NODE_MIN_SIZE - 1UL))
+--- kernel-maemo-2.6.28.test.orig/arch/x86/include/asm/msr-index.h
++++ kernel-maemo-2.6.28.test/arch/x86/include/asm/msr-index.h
+@@ -200,6 +200,35 @@
+ #define MSR_IA32_THERM_STATUS         0x0000019c
+ #define MSR_IA32_MISC_ENABLE          0x000001a0
++/* MISC_ENABLE bits: architectural */
++#define MSR_IA32_MISC_ENABLE_FAST_STRING      (1ULL << 0)
++#define MSR_IA32_MISC_ENABLE_TCC              (1ULL << 1)
++#define MSR_IA32_MISC_ENABLE_EMON             (1ULL << 7)
++#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL      (1ULL << 11)
++#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL     (1ULL << 12)
++#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP       (1ULL << 16)
++#define MSR_IA32_MISC_ENABLE_MWAIT            (1ULL << 18)
++#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID      (1ULL << 22)
++#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE     (1ULL << 23)
++#define MSR_IA32_MISC_ENABLE_XD_DISABLE               (1ULL << 34)
++
++/* MISC_ENABLE bits: model-specific, meaning may vary from core to core */
++#define MSR_IA32_MISC_ENABLE_X87_COMPAT               (1ULL << 2)
++#define MSR_IA32_MISC_ENABLE_TM1              (1ULL << 3)
++#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE       (1ULL << 4)
++#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE  (1ULL << 6)
++#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK    (1ULL << 8)
++#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE (1ULL << 9)
++#define MSR_IA32_MISC_ENABLE_FERR             (1ULL << 10)
++#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX   (1ULL << 10)
++#define MSR_IA32_MISC_ENABLE_TM2              (1ULL << 13)
++#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE (1ULL << 19)
++#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK   (1ULL << 20)
++#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT      (1ULL << 24)
++#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE (1ULL << 37)
++#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE    (1ULL << 38)
++#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE  (1ULL << 39)
++
+ /* Intel Model 6 */
+ #define MSR_P6_EVNTSEL0                       0x00000186
+ #define MSR_P6_EVNTSEL1                       0x00000187
+--- kernel-maemo-2.6.28.test.orig/arch/x86/include/asm/paravirt.h
++++ kernel-maemo-2.6.28.test/arch/x86/include/asm/paravirt.h
+@@ -1352,14 +1352,7 @@
+       PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
+ }
+-static inline void arch_flush_lazy_cpu_mode(void)
+-{
+-      if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)) {
+-              arch_leave_lazy_cpu_mode();
+-              arch_enter_lazy_cpu_mode();
+-      }
+-}
+-
++void arch_flush_lazy_cpu_mode(void);
+ #define  __HAVE_ARCH_ENTER_LAZY_MMU_MODE
+ static inline void arch_enter_lazy_mmu_mode(void)
+@@ -1372,13 +1365,7 @@
+       PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
+ }
+-static inline void arch_flush_lazy_mmu_mode(void)
+-{
+-      if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU)) {
+-              arch_leave_lazy_mmu_mode();
+-              arch_enter_lazy_mmu_mode();
+-      }
+-}
++void arch_flush_lazy_mmu_mode(void);
+ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
+                               unsigned long phys, pgprot_t flags)
+--- kernel-maemo-2.6.28.test.orig/arch/x86/include/asm/pgalloc.h
++++ kernel-maemo-2.6.28.test/arch/x86/include/asm/pgalloc.h
+@@ -42,6 +42,7 @@
+ static inline void pte_free(struct mm_struct *mm, struct page *pte)
+ {
++      pgtable_page_dtor(pte);
+       __free_page(pte);
+ }
+--- kernel-maemo-2.6.28.test.orig/arch/x86/include/asm/processor.h
++++ kernel-maemo-2.6.28.test/arch/x86/include/asm/processor.h
+@@ -349,7 +349,7 @@
+       u8                      no_update;
+       u8                      rm;
+       u8                      alimit;
+-      struct info             *info;
++      struct math_emu_info    *info;
+       u32                     entry_eip;
+ };
+--- kernel-maemo-2.6.28.test.orig/arch/x86/include/asm/seccomp_32.h
++++ kernel-maemo-2.6.28.test/arch/x86/include/asm/seccomp_32.h
+@@ -1,12 +1,6 @@
+ #ifndef _ASM_X86_SECCOMP_32_H
+ #define _ASM_X86_SECCOMP_32_H
+-#include <linux/thread_info.h>
+-
+-#ifdef TIF_32BIT
+-#error "unexpected TIF_32BIT on i386"
+-#endif
+-
+ #include <linux/unistd.h>
+ #define __NR_seccomp_read __NR_read
+--- kernel-maemo-2.6.28.test.orig/arch/x86/include/asm/seccomp_64.h
++++ kernel-maemo-2.6.28.test/arch/x86/include/asm/seccomp_64.h
+@@ -1,14 +1,6 @@
+ #ifndef _ASM_X86_SECCOMP_64_H
+ #define _ASM_X86_SECCOMP_64_H
+-#include <linux/thread_info.h>
+-
+-#ifdef TIF_32BIT
+-#error "unexpected TIF_32BIT on x86_64"
+-#else
+-#define TIF_32BIT TIF_IA32
+-#endif
+-
+ #include <linux/unistd.h>
+ #include <asm/ia32_unistd.h>
+--- kernel-maemo-2.6.28.test.orig/arch/x86/include/asm/traps.h
++++ kernel-maemo-2.6.28.test/arch/x86/include/asm/traps.h
+@@ -41,7 +41,7 @@
+ dotraplinkage void do_overflow(struct pt_regs *, long);
+ dotraplinkage void do_bounds(struct pt_regs *, long);
+ dotraplinkage void do_invalid_op(struct pt_regs *, long);
+-dotraplinkage void do_device_not_available(struct pt_regs *, long);
++dotraplinkage void do_device_not_available(struct pt_regs);
+ dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *, long);
+ dotraplinkage void do_invalid_TSS(struct pt_regs *, long);
+ dotraplinkage void do_segment_not_present(struct pt_regs *, long);
+@@ -74,8 +74,8 @@
+ #ifdef CONFIG_X86_32
+ void math_error(void __user *);
++void math_emulate(struct math_emu_info *);
+ unsigned long patch_espfix_desc(unsigned long, unsigned long);
+-asmlinkage void math_emulate(long);
+ #endif
+ #endif /* _ASM_X86_TRAPS_H */
+--- kernel-maemo-2.6.28.test.orig/arch/x86/kernel/acpi/cstate.c
++++ kernel-maemo-2.6.28.test/arch/x86/kernel/acpi/cstate.c
+@@ -56,6 +56,7 @@
+ static short mwait_supported[ACPI_PROCESSOR_MAX_POWER];
+ #define MWAIT_SUBSTATE_MASK   (0xf)
++#define MWAIT_CSTATE_MASK     (0xf)
+ #define MWAIT_SUBSTATE_SIZE   (4)
+ #define CPUID_MWAIT_LEAF (5)
+@@ -98,7 +99,8 @@
+       cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
+       /* Check whether this particular cx_type (in CST) is supported or not */
+-      cstate_type = (cx->address >> MWAIT_SUBSTATE_SIZE) + 1;
++      cstate_type = ((cx->address >> MWAIT_SUBSTATE_SIZE) &
++                      MWAIT_CSTATE_MASK) + 1;
+       edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE);
+       num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK;
+--- kernel-maemo-2.6.28.test.orig/arch/x86/kernel/apic.c
++++ kernel-maemo-2.6.28.test/arch/x86/kernel/apic.c
+@@ -1451,7 +1451,7 @@
+       switch (boot_cpu_data.x86_vendor) {
+       case X86_VENDOR_AMD:
+               if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) ||
+-                  (boot_cpu_data.x86 == 15))
++                  (boot_cpu_data.x86 >= 15))
+                       break;
+               goto no_apic;
+       case X86_VENDOR_INTEL:
+--- kernel-maemo-2.6.28.test.orig/arch/x86/kernel/cpu/addon_cpuid_features.c
++++ kernel-maemo-2.6.28.test/arch/x86/kernel/cpu/addon_cpuid_features.c
+@@ -120,9 +120,17 @@
+       c->cpu_core_id = phys_pkg_id(c->initial_apicid, ht_mask_width)
+                                                & core_select_mask;
+       c->phys_proc_id = phys_pkg_id(c->initial_apicid, core_plus_mask_width);
++      /*
++       * Reinit the apicid, now that we have extended initial_apicid.
++       */
++      c->apicid = phys_pkg_id(c->initial_apicid, 0);
+ #else
+       c->cpu_core_id = phys_pkg_id(ht_mask_width) & core_select_mask;
+       c->phys_proc_id = phys_pkg_id(core_plus_mask_width);
++      /*
++       * Reinit the apicid, now that we have extended initial_apicid.
++       */
++      c->apicid = phys_pkg_id(0);
+ #endif
+       c->x86_max_cores = (core_level_siblings / smp_num_siblings);
+--- kernel-maemo-2.6.28.test.orig/arch/x86/kernel/cpu/intel.c
++++ kernel-maemo-2.6.28.test/arch/x86/kernel/cpu/intel.c
+@@ -30,6 +30,19 @@
+ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
+ {
++      /* Unmask CPUID levels if masked: */
++      if (c->x86 == 6 && c->x86_model >= 15) {
++              u64 misc_enable;
++
++              rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
++
++              if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) {
++                      misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
++                      wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
++                      c->cpuid_level = cpuid_eax(0);
++              }
++      }
++
+       if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
+               (c->x86 == 0x6 && c->x86_model >= 0x0e))
+               set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
+@@ -242,6 +255,13 @@
+       intel_workarounds(c);
++      /*
++       * Detect the extended topology information if available. This
++       * will reinitialise the initial_apicid which will be used
++       * in init_intel_cacheinfo()
++       */
++      detect_extended_topology(c);
++
+       l2 = init_intel_cacheinfo(c);
+       if (c->cpuid_level > 9) {
+               unsigned eax = cpuid_eax(10);
+@@ -313,7 +333,6 @@
+ #endif
+-      detect_extended_topology(c);
+       if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
+               /*
+                * let's use the legacy cpuid vector 0x1 and 0x4 for topology
+--- kernel-maemo-2.6.28.test.orig/arch/x86/kernel/cpu/mtrr/generic.c
++++ kernel-maemo-2.6.28.test/arch/x86/kernel/cpu/mtrr/generic.c
+@@ -45,6 +45,32 @@
+ static int mtrr_show;
+ module_param_named(show, mtrr_show, bool, 0);
++/**
++ * BIOS is expected to clear MtrrFixDramModEn bit, see for example
++ * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
++ * Opteron Processors" (26094 Rev. 3.30 February 2006), section
++ * "13.2.1.2 SYSCFG Register": "The MtrrFixDramModEn bit should be set
++ * to 1 during BIOS initalization of the fixed MTRRs, then cleared to
++ * 0 for operation."
++ */
++static inline void k8_check_syscfg_dram_mod_en(void)
++{
++      u32 lo, hi;
++
++      if (!((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
++            (boot_cpu_data.x86 >= 0x0f)))
++              return;
++
++      rdmsr(MSR_K8_SYSCFG, lo, hi);
++      if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) {
++              printk(KERN_ERR FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]"
++                     " not cleared by BIOS, clearing this bit\n",
++                     smp_processor_id());
++              lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY;
++              mtrr_wrmsr(MSR_K8_SYSCFG, lo, hi);
++      }
++}
++
+ /*
+  * Returns the effective MTRR type for the region
+  * Error returns:
+@@ -178,6 +204,8 @@
+       unsigned int *p = (unsigned int *) frs;
+       int i;
++      k8_check_syscfg_dram_mod_en();
++
+       rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
+       for (i = 0; i < 2; i++)
+@@ -312,27 +340,10 @@
+ }
+ /**
+- * Enable and allow read/write of extended fixed-range MTRR bits on K8 CPUs
+- * see AMD publication no. 24593, chapter 3.2.1 for more information
+- */
+-static inline void k8_enable_fixed_iorrs(void)
+-{
+-      unsigned lo, hi;
+-
+-      rdmsr(MSR_K8_SYSCFG, lo, hi);
+-      mtrr_wrmsr(MSR_K8_SYSCFG, lo
+-                              | K8_MTRRFIXRANGE_DRAM_ENABLE
+-                              | K8_MTRRFIXRANGE_DRAM_MODIFY, hi);
+-}
+-
+-/**
+  * set_fixed_range - checks & updates a fixed-range MTRR if it differs from the value it should have
+  * @msr: MSR address of the MTTR which should be checked and updated
+  * @changed: pointer which indicates whether the MTRR needed to be changed
+  * @msrwords: pointer to the MSR values which the MSR should have
+- *
+- * If K8 extentions are wanted, update the K8 SYSCFG MSR also.
+- * See AMD publication no. 24593, chapter 7.8.1, page 233 for more information.
+  */
+ static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
+ {
+@@ -341,10 +352,6 @@
+       rdmsr(msr, lo, hi);
+       if (lo != msrwords[0] || hi != msrwords[1]) {
+-              if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+-                  (boot_cpu_data.x86 >= 0x0f && boot_cpu_data.x86 <= 0x11) &&
+-                  ((msrwords[0] | msrwords[1]) & K8_MTRR_RDMEM_WRMEM_MASK))
+-                      k8_enable_fixed_iorrs();
+               mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
+               *changed = true;
+       }
+@@ -423,6 +430,8 @@
+       bool changed = false;
+       int block=-1, range;
++      k8_check_syscfg_dram_mod_en();
++
+       while (fixed_range_blocks[++block].ranges)
+           for (range=0; range < fixed_range_blocks[block].ranges; range++)
+               set_fixed_range(fixed_range_blocks[block].base_msr + range,
+--- kernel-maemo-2.6.28.test.orig/arch/x86/kernel/cpu/mtrr/main.c
++++ kernel-maemo-2.6.28.test/arch/x86/kernel/cpu/mtrr/main.c
+@@ -1600,8 +1600,7 @@
+       /* kvm/qemu doesn't have mtrr set right, don't trim them all */
+       if (!highest_pfn) {
+-              WARN(!kvm_para_available(), KERN_WARNING
+-                              "WARNING: strange, CPU MTRRs all blank?\n");
++              printk(KERN_INFO "CPU MTRRs all blank - virtualized system.\n");
+               return 0;
+       }
+--- kernel-maemo-2.6.28.test.orig/arch/x86/kernel/head64.c
++++ kernel-maemo-2.6.28.test/arch/x86/kernel/head64.c
+@@ -26,7 +26,7 @@
+ #include <asm/bios_ebda.h>
+ /* boot cpu pda */
+-static struct x8664_pda _boot_cpu_pda __read_mostly;
++static struct x8664_pda _boot_cpu_pda;
+ #ifdef CONFIG_SMP
+ /*
+--- kernel-maemo-2.6.28.test.orig/arch/x86/kernel/head_64.S
++++ kernel-maemo-2.6.28.test/arch/x86/kernel/head_64.S
+@@ -305,7 +305,7 @@
+       call dump_stack
+ #ifdef CONFIG_KALLSYMS        
+       leaq early_idt_ripmsg(%rip),%rdi
+-      movq 8(%rsp),%rsi       # get rip again
++      movq 0(%rsp),%rsi       # get rip again
+       call __print_symbol
+ #endif
+ #endif /* EARLY_PRINTK */
+--- kernel-maemo-2.6.28.test.orig/arch/x86/kernel/hpet.c
++++ kernel-maemo-2.6.28.test/arch/x86/kernel/hpet.c
+@@ -267,6 +267,8 @@
+               now = hpet_readl(HPET_COUNTER);
+               cmp = now + (unsigned long) delta;
+               cfg = hpet_readl(HPET_Tn_CFG(timer));
++              /* Make sure we use edge triggered interrupts */
++              cfg &= ~HPET_TN_LEVEL;
+               cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
+                      HPET_TN_SETVAL | HPET_TN_32BIT;
+               hpet_writel(cfg, HPET_Tn_CFG(timer));
+--- kernel-maemo-2.6.28.test.orig/arch/x86/kernel/microcode_amd.c
++++ kernel-maemo-2.6.28.test/arch/x86/kernel/microcode_amd.c
+@@ -62,7 +62,7 @@
+       unsigned int  mc_patch_data_checksum;
+       unsigned int  nb_dev_id;
+       unsigned int  sb_dev_id;
+-      unsigned char processor_rev_id[2];
++      u16 processor_rev_id;
+       unsigned char nb_rev_id;
+       unsigned char sb_rev_id;
+       unsigned char bios_api_rev;
+@@ -125,7 +125,7 @@
+       while (equiv_cpu_table[i].installed_cpu != 0) {
+               if (current_cpu_id == equiv_cpu_table[i].installed_cpu) {
+-                      equiv_cpu_id = equiv_cpu_table[i].equiv_cpu;
++                      equiv_cpu_id = equiv_cpu_table[i].equiv_cpu & 0xffff;
+                       break;
+               }
+               i++;
+@@ -137,21 +137,10 @@
+               return 0;
+       }
+-      if ((mc_header->processor_rev_id[0]) != (equiv_cpu_id & 0xff)) {
+-              printk(KERN_ERR
+-                      "microcode: CPU%d patch does not match "
+-                      "(patch is %x, cpu extended is %x) \n",
+-                      cpu, mc_header->processor_rev_id[0],
+-                      (equiv_cpu_id & 0xff));
+-              return 0;
+-      }
+-
+-      if ((mc_header->processor_rev_id[1]) != ((equiv_cpu_id >> 16) & 0xff)) {
+-              printk(KERN_ERR "microcode: CPU%d patch does not match "
+-                      "(patch is %x, cpu base id is %x) \n",
+-                      cpu, mc_header->processor_rev_id[1],
+-                      ((equiv_cpu_id >> 16) & 0xff));
+-
++      if (mc_header->processor_rev_id != equiv_cpu_id) {
++              printk(KERN_ERR "microcode: CPU%d patch does not match "
++                     "(processor_rev_id: %x, eqiv_cpu_id: %x)\n",
++                     cpu, mc_header->processor_rev_id, equiv_cpu_id);
+               return 0;
+       }
+--- kernel-maemo-2.6.28.test.orig/arch/x86/kernel/paravirt.c
++++ kernel-maemo-2.6.28.test/arch/x86/kernel/paravirt.c
+@@ -268,6 +268,30 @@
+       return __get_cpu_var(paravirt_lazy_mode);
+ }
++void arch_flush_lazy_mmu_mode(void)
++{
++      preempt_disable();
++
++      if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
++              arch_leave_lazy_mmu_mode();
++              arch_enter_lazy_mmu_mode();
++      }
++
++      preempt_enable();
++}
++
++void arch_flush_lazy_cpu_mode(void)
++{
++      preempt_disable();
++
++      if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) {
++              arch_leave_lazy_cpu_mode();
++              arch_enter_lazy_cpu_mode();
++      }
++
++      preempt_enable();
++}
++
+ struct pv_info pv_info = {
+       .name = "bare hardware",
+       .paravirt_enabled = 0,
+--- kernel-maemo-2.6.28.test.orig/arch/x86/kernel/ptrace.c
++++ kernel-maemo-2.6.28.test/arch/x86/kernel/ptrace.c
+@@ -1512,7 +1512,7 @@
+ #ifdef CONFIG_X86_32
+ # define IS_IA32      1
+ #elif defined CONFIG_IA32_EMULATION
+-# define IS_IA32      test_thread_flag(TIF_IA32)
++# define IS_IA32      is_compat_task()
+ #else
+ # define IS_IA32      0
+ #endif
+--- kernel-maemo-2.6.28.test.orig/arch/x86/kernel/reboot.c
++++ kernel-maemo-2.6.28.test/arch/x86/kernel/reboot.c
+@@ -202,6 +202,14 @@
+                       DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"),
+               },
+       },
++      {       /* Handle problems with rebooting on Dell XPS710 */
++              .callback = set_bios_reboot,
++              .ident = "Dell XPS710",
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "Dell XPS710"),
++              },
++      },
+       { }
+ };
+--- kernel-maemo-2.6.28.test.orig/arch/x86/kernel/syscall_table_32.S
++++ kernel-maemo-2.6.28.test/arch/x86/kernel/syscall_table_32.S
+@@ -88,7 +88,7 @@
+       .long sys_uselib
+       .long sys_swapon
+       .long sys_reboot
+-      .long old_readdir
++      .long sys_old_readdir
+       .long old_mmap          /* 90 */
+       .long sys_munmap
+       .long sys_truncate
+--- kernel-maemo-2.6.28.test.orig/arch/x86/kernel/tlb_uv.c
++++ kernel-maemo-2.6.28.test/arch/x86/kernel/tlb_uv.c
+@@ -586,7 +586,6 @@
+ static struct bau_control * __init uv_table_bases_init(int blade, int node)
+ {
+       int i;
+-      int *ip;
+       struct bau_msg_status *msp;
+       struct bau_control *bau_tabp;
+@@ -603,13 +602,6 @@
+               bau_cpubits_clear(&msp->seen_by, (int)
+                                 uv_blade_nr_possible_cpus(blade));
+-      bau_tabp->watching =
+-          kmalloc_node(sizeof(int) * DEST_NUM_RESOURCES, GFP_KERNEL, node);
+-      BUG_ON(!bau_tabp->watching);
+-
+-      for (i = 0, ip = bau_tabp->watching; i < DEST_Q_SIZE; i++, ip++)
+-              *ip = 0;
+-
+       uv_bau_table_bases[blade] = bau_tabp;
+       return bau_tabp;
+@@ -632,7 +624,6 @@
+               bcp->bau_msg_head       = bau_tablesp->va_queue_first;
+               bcp->va_queue_first     = bau_tablesp->va_queue_first;
+               bcp->va_queue_last      = bau_tablesp->va_queue_last;
+-              bcp->watching           = bau_tablesp->watching;
+               bcp->msg_statuses       = bau_tablesp->msg_statuses;
+               bcp->descriptor_base    = adp;
+       }
+--- kernel-maemo-2.6.28.test.orig/arch/x86/kernel/traps.c
++++ kernel-maemo-2.6.28.test/arch/x86/kernel/traps.c
+@@ -104,6 +104,12 @@
+               local_irq_enable();
+ }
++static inline void conditional_cli(struct pt_regs *regs)
++{
++      if (regs->flags & X86_EFLAGS_IF)
++              local_irq_disable();
++}
++
+ static inline void preempt_conditional_cli(struct pt_regs *regs)
+ {
+       if (regs->flags & X86_EFLAGS_IF)
+@@ -629,8 +635,10 @@
+ #ifdef CONFIG_X86_32
+ debug_vm86:
++      /* reenable preemption: handle_vm86_trap() might sleep */
++      dec_preempt_count();
+       handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
+-      preempt_conditional_cli(regs);
++      conditional_cli(regs);
+       return;
+ #endif
+@@ -904,7 +912,7 @@
+ EXPORT_SYMBOL_GPL(math_state_restore);
+ #ifndef CONFIG_MATH_EMULATION
+-asmlinkage void math_emulate(long arg)
++void math_emulate(struct math_emu_info *info)
+ {
+       printk(KERN_EMERG
+               "math-emulation not enabled and no coprocessor found.\n");
+@@ -914,16 +922,19 @@
+ }
+ #endif /* CONFIG_MATH_EMULATION */
+-dotraplinkage void __kprobes
+-do_device_not_available(struct pt_regs *regs, long error)
++dotraplinkage void __kprobes do_device_not_available(struct pt_regs regs)
+ {
+ #ifdef CONFIG_X86_32
+       if (read_cr0() & X86_CR0_EM) {
+-              conditional_sti(regs);
+-              math_emulate(0);
++              struct math_emu_info info = { };
++
++              conditional_sti(&regs);
++
++              info.regs = &regs;
++              math_emulate(&info);
+       } else {
+               math_state_restore(); /* interrupts still off */
+-              conditional_sti(regs);
++              conditional_sti(&regs);
+       }
+ #else
+       math_state_restore();
+--- kernel-maemo-2.6.28.test.orig/arch/x86/kernel/vmi_32.c
++++ kernel-maemo-2.6.28.test/arch/x86/kernel/vmi_32.c
+@@ -430,6 +430,16 @@
+ }
+ /*
++ * We use the pgd_free hook for releasing the pgd page:
++ */
++static void vmi_pgd_free(struct mm_struct *mm, pgd_t *pgd)
++{
++      unsigned long pfn = __pa(pgd) >> PAGE_SHIFT;
++
++      vmi_ops.release_page(pfn, VMI_PAGE_L2);
++}
++
++/*
+  * Helper macros for MMU update flags.  We can defer updates until a flush
+  * or page invalidation only if the update is to the current address space
+  * (otherwise, there is no flush).  We must check against init_mm, since
+@@ -881,6 +891,7 @@
+       if (vmi_ops.release_page) {
+               pv_mmu_ops.release_pte = vmi_release_pte;
+               pv_mmu_ops.release_pmd = vmi_release_pmd;
++              pv_mmu_ops.pgd_free = vmi_pgd_free;
+       }
+       /* Set linear is needed in all cases */
+--- kernel-maemo-2.6.28.test.orig/arch/x86/kernel/vmiclock_32.c
++++ kernel-maemo-2.6.28.test/arch/x86/kernel/vmiclock_32.c
+@@ -283,10 +283,13 @@
+ #endif
+ /** vmi clocksource */
++static struct clocksource clocksource_vmi;
+ static cycle_t read_real_cycles(void)
+ {
+-      return vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL);
++      cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL);
++      return ret >= clocksource_vmi.cycle_last ?
++              ret : clocksource_vmi.cycle_last;
+ }
+ static struct clocksource clocksource_vmi = {
+--- kernel-maemo-2.6.28.test.orig/arch/x86/kvm/i8254.c
++++ kernel-maemo-2.6.28.test/arch/x86/kvm/i8254.c
+@@ -207,7 +207,7 @@
+       hrtimer_add_expires_ns(&pt->timer, pt->period);
+       pt->scheduled = hrtimer_get_expires_ns(&pt->timer);
+       if (pt->period)
+-              ps->channels[0].count_load_time = hrtimer_get_expires(&pt->timer);
++              ps->channels[0].count_load_time = ktime_get();
+       return (pt->period == 0 ? 0 : 1);
+ }
+--- kernel-maemo-2.6.28.test.orig/arch/x86/kvm/irq.c
++++ kernel-maemo-2.6.28.test/arch/x86/kvm/irq.c
+@@ -87,13 +87,6 @@
+ }
+ EXPORT_SYMBOL_GPL(kvm_inject_pending_timer_irqs);
+-void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec)
+-{
+-      kvm_apic_timer_intr_post(vcpu, vec);
+-      /* TODO: PIT, RTC etc. */
+-}
+-EXPORT_SYMBOL_GPL(kvm_timer_intr_post);
+-
+ void __kvm_migrate_timers(struct kvm_vcpu *vcpu)
+ {
+       __kvm_migrate_apic_timer(vcpu);
+--- kernel-maemo-2.6.28.test.orig/arch/x86/kvm/irq.h
++++ kernel-maemo-2.6.28.test/arch/x86/kvm/irq.h
+@@ -84,7 +84,6 @@
+ void kvm_pic_reset(struct kvm_kpic_state *s);
+-void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec);
+ void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu);
+ void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu);
+ void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu);
+--- kernel-maemo-2.6.28.test.orig/arch/x86/kvm/lapic.c
++++ kernel-maemo-2.6.28.test/arch/x86/kvm/lapic.c
+@@ -35,6 +35,12 @@
+ #include "kvm_cache_regs.h"
+ #include "irq.h"
++#ifndef CONFIG_X86_64
++#define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
++#else
++#define mod_64(x, y) ((x) % (y))
++#endif
++
+ #define PRId64 "d"
+ #define PRIx64 "llx"
+ #define PRIu64 "u"
+@@ -497,52 +503,22 @@
+ static u32 apic_get_tmcct(struct kvm_lapic *apic)
+ {
+-      u64 counter_passed;
+-      ktime_t passed, now;
++      ktime_t remaining;
++      s64 ns;
+       u32 tmcct;
+       ASSERT(apic != NULL);
+-      now = apic->timer.dev.base->get_time();
+-      tmcct = apic_get_reg(apic, APIC_TMICT);
+-
+       /* if initial count is 0, current count should also be 0 */
+-      if (tmcct == 0)
++      if (apic_get_reg(apic, APIC_TMICT) == 0)
+               return 0;
+-      if (unlikely(ktime_to_ns(now) <=
+-              ktime_to_ns(apic->timer.last_update))) {
+-              /* Wrap around */
+-              passed = ktime_add(( {
+-                                  (ktime_t) {
+-                                  .tv64 = KTIME_MAX -
+-                                  (apic->timer.last_update).tv64}; }
+-                                 ), now);
+-              apic_debug("time elapsed\n");
+-      } else
+-              passed = ktime_sub(now, apic->timer.last_update);
+-
+-      counter_passed = div64_u64(ktime_to_ns(passed),
+-                                 (APIC_BUS_CYCLE_NS * apic->timer.divide_count));
+-
+-      if (counter_passed > tmcct) {
+-              if (unlikely(!apic_lvtt_period(apic))) {
+-                      /* one-shot timers stick at 0 until reset */
+-                      tmcct = 0;
+-              } else {
+-                      /*
+-                       * periodic timers reset to APIC_TMICT when they
+-                       * hit 0. The while loop simulates this happening N
+-                       * times. (counter_passed %= tmcct) would also work,
+-                       * but might be slower or not work on 32-bit??
+-                       */
+-                      while (counter_passed > tmcct)
+-                              counter_passed -= tmcct;
+-                      tmcct -= counter_passed;
+-              }
+-      } else {
+-              tmcct -= counter_passed;
+-      }
++      remaining = hrtimer_expires_remaining(&apic->timer.dev);
++      if (ktime_to_ns(remaining) < 0)
++              remaining = ktime_set(0, 0);
++
++      ns = mod_64(ktime_to_ns(remaining), apic->timer.period);
++      tmcct = div64_u64(ns, (APIC_BUS_CYCLE_NS * apic->timer.divide_count));
+       return tmcct;
+ }
+@@ -639,8 +615,6 @@
+ {
+       ktime_t now = apic->timer.dev.base->get_time();
+-      apic->timer.last_update = now;
+-
+       apic->timer.period = apic_get_reg(apic, APIC_TMICT) *
+                   APIC_BUS_CYCLE_NS * apic->timer.divide_count;
+       atomic_set(&apic->timer.pending, 0);
+@@ -1068,16 +1042,6 @@
+       }
+ }
+-void kvm_apic_timer_intr_post(struct kvm_vcpu *vcpu, int vec)
+-{
+-      struct kvm_lapic *apic = vcpu->arch.apic;
+-
+-      if (apic && apic_lvt_vector(apic, APIC_LVTT) == vec)
+-              apic->timer.last_update = ktime_add_ns(
+-                              apic->timer.last_update,
+-                              apic->timer.period);
+-}
+-
+ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
+ {
+       int vector = kvm_apic_has_interrupt(vcpu);
+--- kernel-maemo-2.6.28.test.orig/arch/x86/kvm/lapic.h
++++ kernel-maemo-2.6.28.test/arch/x86/kvm/lapic.h
+@@ -12,7 +12,6 @@
+               atomic_t pending;
+               s64 period;     /* unit: ns */
+               u32 divide_count;
+-              ktime_t last_update;
+               struct hrtimer dev;
+       } timer;
+       struct kvm_vcpu *vcpu;
+@@ -42,7 +41,6 @@
+ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu);
+ int kvm_lapic_enabled(struct kvm_vcpu *vcpu);
+ int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
+-void kvm_apic_timer_intr_post(struct kvm_vcpu *vcpu, int vec);
+ void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
+ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu);
+--- kernel-maemo-2.6.28.test.orig/arch/x86/kvm/mmu.c
++++ kernel-maemo-2.6.28.test/arch/x86/kvm/mmu.c
+@@ -384,7 +384,9 @@
+ {
+       int *write_count;
+-      write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
++      gfn = unalias_gfn(kvm, gfn);
++      write_count = slot_largepage_idx(gfn,
++                                       gfn_to_memslot_unaliased(kvm, gfn));
+       *write_count += 1;
+ }
+@@ -392,16 +394,20 @@
+ {
+       int *write_count;
+-      write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
++      gfn = unalias_gfn(kvm, gfn);
++      write_count = slot_largepage_idx(gfn,
++                                       gfn_to_memslot_unaliased(kvm, gfn));
+       *write_count -= 1;
+       WARN_ON(*write_count < 0);
+ }
+ static int has_wrprotected_page(struct kvm *kvm, gfn_t gfn)
+ {
+-      struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
++      struct kvm_memory_slot *slot;
+       int *largepage_idx;
++      gfn = unalias_gfn(kvm, gfn);
++      slot = gfn_to_memslot_unaliased(kvm, gfn);
+       if (slot) {
+               largepage_idx = slot_largepage_idx(gfn, slot);
+               return *largepage_idx;
+@@ -787,7 +793,7 @@
+       set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
+       list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
+       ASSERT(is_empty_shadow_page(sp->spt));
+-      sp->slot_bitmap = 0;
++      bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
+       sp->multimapped = 0;
+       sp->parent_pte = parent_pte;
+       --vcpu->kvm->arch.n_free_mmu_pages;
+@@ -975,7 +981,7 @@
+       for_each_unsync_children(sp->unsync_child_bitmap, i) {
+               u64 ent = sp->spt[i];
+-              if (is_shadow_present_pte(ent)) {
++              if (is_shadow_present_pte(ent) && !is_large_pte(ent)) {
+                       struct kvm_mmu_page *child;
+                       child = page_header(ent & PT64_BASE_ADDR_MASK);
+@@ -1153,6 +1159,8 @@
+       if (level == PT32E_ROOT_LEVEL) {
+               shadow_addr = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
+               shadow_addr &= PT64_BASE_ADDR_MASK;
++              if (!shadow_addr)
++                      return 1;
+               --level;
+       }
+@@ -1362,7 +1370,7 @@
+       int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
+       struct kvm_mmu_page *sp = page_header(__pa(pte));
+-      __set_bit(slot, &sp->slot_bitmap);
++      __set_bit(slot, sp->slot_bitmap);
+ }
+ static void mmu_convert_notrap(struct kvm_mmu_page *sp)
+@@ -2451,7 +2459,7 @@
+               int i;
+               u64 *pt;
+-              if (!test_bit(slot, &sp->slot_bitmap))
++              if (!test_bit(slot, sp->slot_bitmap))
+                       continue;
+               pt = sp->spt;
+@@ -2860,8 +2868,8 @@
+               if (sp->role.metaphysical)
+                       continue;
+-              slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
+               gfn = unalias_gfn(vcpu->kvm, sp->gfn);
++              slot = gfn_to_memslot_unaliased(vcpu->kvm, sp->gfn);
+               rmapp = &slot->rmap[gfn - slot->base_gfn];
+               if (*rmapp)
+                       printk(KERN_ERR "%s: (%s) shadow page has writable"
+--- kernel-maemo-2.6.28.test.orig/arch/x86/kvm/paging_tmpl.h
++++ kernel-maemo-2.6.28.test/arch/x86/kvm/paging_tmpl.h
+@@ -467,9 +467,13 @@
+                                     u64 *sptep, int level)
+ {
+-      if (level == PT_PAGE_TABLE_LEVEL) {
+-              if (is_shadow_present_pte(*sptep))
++      if (level == PT_PAGE_TABLE_LEVEL ||
++          ((level == PT_DIRECTORY_LEVEL) && is_large_pte(*sptep))) {
++              if (is_shadow_present_pte(*sptep)) {
+                       rmap_remove(vcpu->kvm, sptep);
++                      if (is_large_pte(*sptep))
++                              --vcpu->kvm->stat.lpages;
++              }
+               set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
+               return 1;
+       }
+--- kernel-maemo-2.6.28.test.orig/arch/x86/kvm/svm.c
++++ kernel-maemo-2.6.28.test/arch/x86/kvm/svm.c
+@@ -772,6 +772,22 @@
+       var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
+       var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
+       var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
++
++      /*
++       * SVM always stores 0 for the 'G' bit in the CS selector in
++       * the VMCB on a VMEXIT. This hurts cross-vendor migration:
++       * Intel's VMENTRY has a check on the 'G' bit.
++       */
++      if (seg == VCPU_SREG_CS)
++              var->g = s->limit > 0xfffff;
++
++      /*
++       * Work around a bug where the busy flag in the tr selector
++       * isn't exposed
++       */
++      if (seg == VCPU_SREG_TR)
++              var->type |= 0x2;
++
+       var->unusable = !var->present;
+ }
+@@ -1596,7 +1612,6 @@
+       /* Okay, we can deliver the interrupt: grab it and update PIC state. */
+       intr_vector = kvm_cpu_get_interrupt(vcpu);
+       svm_inject_irq(svm, intr_vector);
+-      kvm_timer_intr_post(vcpu, intr_vector);
+ out:
+       update_cr8_intercept(vcpu);
+ }
+--- kernel-maemo-2.6.28.test.orig/arch/x86/kvm/vmx.c
++++ kernel-maemo-2.6.28.test/arch/x86/kvm/vmx.c
+@@ -897,6 +897,7 @@
+               data = vmcs_readl(GUEST_SYSENTER_ESP);
+               break;
+       default:
++              vmx_load_host_state(to_vmx(vcpu));
+               msr = find_msr_entry(to_vmx(vcpu), msr_index);
+               if (msr) {
+                       data = msr->data;
+@@ -2407,7 +2408,7 @@
+ {
+       int ret;
+       struct kvm_userspace_memory_region tss_mem = {
+-              .slot = 8,
++              .slot = TSS_PRIVATE_MEMSLOT,
+               .guest_phys_addr = addr,
+               .memory_size = PAGE_SIZE * 3,
+               .flags = 0,
+@@ -3171,10 +3172,8 @@
+               else
+                       enable_irq_window(vcpu);
+       }
+-      if (vcpu->arch.interrupt.pending) {
++      if (vcpu->arch.interrupt.pending)
+               vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr);
+-              kvm_timer_intr_post(vcpu, vcpu->arch.interrupt.nr);
+-      }
+ }
+ /*
+--- kernel-maemo-2.6.28.test.orig/arch/x86/kvm/vmx.h
++++ kernel-maemo-2.6.28.test/arch/x86/kvm/vmx.h
+@@ -331,8 +331,9 @@
+ #define AR_RESERVD_MASK 0xfffe0f00
+-#define APIC_ACCESS_PAGE_PRIVATE_MEMSLOT      9
+-#define IDENTITY_PAGETABLE_PRIVATE_MEMSLOT    10
++#define TSS_PRIVATE_MEMSLOT                   (KVM_MEMORY_SLOTS + 0)
++#define APIC_ACCESS_PAGE_PRIVATE_MEMSLOT      (KVM_MEMORY_SLOTS + 1)
++#define IDENTITY_PAGETABLE_PRIVATE_MEMSLOT    (KVM_MEMORY_SLOTS + 2)
+ #define VMX_NR_VPIDS                          (1 << 16)
+ #define VMX_VPID_EXTENT_SINGLE_CONTEXT                1
+--- kernel-maemo-2.6.28.test.orig/arch/x86/kvm/x86.c
++++ kernel-maemo-2.6.28.test/arch/x86/kvm/x86.c
+@@ -906,7 +906,6 @@
+       case KVM_CAP_USER_MEMORY:
+       case KVM_CAP_SET_TSS_ADDR:
+       case KVM_CAP_EXT_CPUID:
+-      case KVM_CAP_CLOCKSOURCE:
+       case KVM_CAP_PIT:
+       case KVM_CAP_NOP_IO_DELAY:
+       case KVM_CAP_MP_STATE:
+@@ -931,6 +930,9 @@
+       case KVM_CAP_IOMMU:
+               r = intel_iommu_found();
+               break;
++      case KVM_CAP_CLOCKSOURCE:
++              r = boot_cpu_has(X86_FEATURE_CONSTANT_TSC);
++              break;
+       default:
+               r = 0;
+               break;
+@@ -1188,6 +1190,7 @@
+               int t, times = entry->eax & 0xff;
+               entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
++              entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
+               for (t = 1; t < times && *nent < maxnent; ++t) {
+                       do_cpuid_1_ent(&entry[t], function, 0);
+                       entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
+@@ -1218,7 +1221,7 @@
+               entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+               /* read more entries until level_type is zero */
+               for (i = 1; *nent < maxnent; ++i) {
+-                      level_type = entry[i - 1].ecx & 0xff;
++                      level_type = entry[i - 1].ecx & 0xff00;
+                       if (!level_type)
+                               break;
+                       do_cpuid_1_ent(&entry[i], function, i);
+@@ -2729,7 +2732,7 @@
+       e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
+       /* when no next entry is found, the current entry[i] is reselected */
+-      for (j = i + 1; j == i; j = (j + 1) % nent) {
++      for (j = i + 1; ; j = (j + 1) % nent) {
+               struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
+               if (ej->function == e->function) {
+                       ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
+@@ -2973,7 +2976,7 @@
+               pr_debug("vcpu %d received sipi with vector # %x\n",
+                        vcpu->vcpu_id, vcpu->arch.sipi_vector);
+               kvm_lapic_reset(vcpu);
+-              r = kvm_x86_ops->vcpu_reset(vcpu);
++              r = kvm_arch_vcpu_reset(vcpu);
+               if (r)
+                       return r;
+               vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+@@ -3925,6 +3928,9 @@
+ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
+ {
++      vcpu->arch.nmi_pending = false;
++      vcpu->arch.nmi_injected = false;
++
+       return kvm_x86_ops->vcpu_reset(vcpu);
+ }
+--- kernel-maemo-2.6.28.test.orig/arch/x86/kvm/x86_emulate.c
++++ kernel-maemo-2.6.28.test/arch/x86/kvm/x86_emulate.c
+@@ -299,7 +299,7 @@
+ static u16 group2_table[] = {
+       [Group7*8] =
+-      SrcNone | ModRM, 0, 0, 0,
++      SrcNone | ModRM, 0, 0, SrcNone | ModRM,
+       SrcNone | ModRM | DstMem | Mov, 0,
+       SrcMem16 | ModRM | Mov, 0,
+ };
+--- kernel-maemo-2.6.28.test.orig/arch/x86/lib/usercopy_32.c
++++ kernel-maemo-2.6.28.test/arch/x86/lib/usercopy_32.c
+@@ -56,7 +56,7 @@
+               "       jmp 2b\n"                                          \
+               ".previous\n"                                              \
+               _ASM_EXTABLE(0b,3b)                                        \
+-              : "=d"(res), "=c"(count), "=&a" (__d0), "=&S" (__d1),      \
++              : "=&d"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1),    \
+                 "=&D" (__d2)                                             \
+               : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \
+               : "memory");                                               \
+@@ -218,7 +218,7 @@
+               "       .align 4\n"
+               "       .long 0b,2b\n"
+               ".previous"
+-              :"=r" (n), "=D" (s), "=a" (res), "=c" (tmp)
++              :"=&r" (n), "=&D" (s), "=&a" (res), "=&c" (tmp)
+               :"0" (n), "1" (s), "2" (0), "3" (mask)
+               :"cc");
+       return res & mask;
+--- kernel-maemo-2.6.28.test.orig/arch/x86/lib/usercopy_64.c
++++ kernel-maemo-2.6.28.test/arch/x86/lib/usercopy_64.c
+@@ -32,7 +32,7 @@
+               "       jmp 2b\n"                                          \
+               ".previous\n"                                              \
+               _ASM_EXTABLE(0b,3b)                                        \
+-              : "=r"(res), "=c"(count), "=&a" (__d0), "=&S" (__d1),      \
++              : "=&r"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1),    \
+                 "=&D" (__d2)                                             \
+               : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \
+               : "memory");                                               \
+@@ -86,7 +86,7 @@
+               ".previous\n"
+               _ASM_EXTABLE(0b,3b)
+               _ASM_EXTABLE(1b,2b)
+-              : [size8] "=c"(size), [dst] "=&D" (__d0)
++              : [size8] "=&c"(size), [dst] "=&D" (__d0)
+               : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
+                 [zero] "r" (0UL), [eight] "r" (8UL));
+       return size;
+--- kernel-maemo-2.6.28.test.orig/arch/x86/math-emu/fpu_entry.c
++++ kernel-maemo-2.6.28.test/arch/x86/math-emu/fpu_entry.c
+@@ -131,7 +131,7 @@
+ static int valid_prefix(u_char *Byte, u_char __user ** fpu_eip,
+                       overrides * override);
+-asmlinkage void math_emulate(long arg)
++void math_emulate(struct math_emu_info *info)
+ {
+       u_char FPU_modrm, byte1;
+       unsigned short code;
+@@ -161,7 +161,7 @@
+       RE_ENTRANT_CHECK_ON;
+ #endif /* RE_ENTRANT_CHECKING */
+-      SETUP_DATA_AREA(arg);
++      FPU_info = info;
+       FPU_ORIG_EIP = FPU_EIP;
+@@ -659,7 +659,7 @@
+       }
+ }
+-void math_abort(struct info *info, unsigned int signal)
++void math_abort(struct math_emu_info *info, unsigned int signal)
+ {
+       FPU_EIP = FPU_ORIG_EIP;
+       current->thread.trap_no = 16;
+--- kernel-maemo-2.6.28.test.orig/arch/x86/math-emu/fpu_proto.h
++++ kernel-maemo-2.6.28.test/arch/x86/math-emu/fpu_proto.h
+@@ -51,8 +51,8 @@
+ extern void fst_i_(void);
+ extern void fstp_i(void);
+ /* fpu_entry.c */
+-asmlinkage extern void math_emulate(long arg);
+-extern void math_abort(struct info *info, unsigned int signal);
++extern void math_emulate(struct math_emu_info *info);
++extern void math_abort(struct math_emu_info *info, unsigned int signal);
+ /* fpu_etc.c */
+ extern void FPU_etc(void);
+ /* fpu_tags.c */
+--- kernel-maemo-2.6.28.test.orig/arch/x86/math-emu/fpu_system.h
++++ kernel-maemo-2.6.28.test/arch/x86/math-emu/fpu_system.h
+@@ -16,10 +16,6 @@
+ #include <linux/kernel.h>
+ #include <linux/mm.h>
+-/* This sets the pointer FPU_info to point to the argument part
+-   of the stack frame of math_emulate() */
+-#define SETUP_DATA_AREA(arg)  FPU_info = (struct info *) &arg
+-
+ /* s is always from a cpu register, and the cpu does bounds checking
+  * during register load --> no further bounds checks needed */
+ #define LDT_DESCRIPTOR(s)     (((struct desc_struct *)current->mm->context.ldt)[(s) >> 3])
+@@ -38,12 +34,12 @@
+ #define I387                  (current->thread.xstate)
+ #define FPU_info              (I387->soft.info)
+-#define FPU_CS                        (*(unsigned short *) &(FPU_info->___cs))
+-#define FPU_SS                        (*(unsigned short *) &(FPU_info->___ss))
+-#define FPU_DS                        (*(unsigned short *) &(FPU_info->___ds))
+-#define FPU_EAX                       (FPU_info->___eax)
+-#define FPU_EFLAGS            (FPU_info->___eflags)
+-#define FPU_EIP                       (FPU_info->___eip)
++#define FPU_CS                        (*(unsigned short *) &(FPU_info->regs->cs))
++#define FPU_SS                        (*(unsigned short *) &(FPU_info->regs->ss))
++#define FPU_DS                        (*(unsigned short *) &(FPU_info->regs->ds))
++#define FPU_EAX                       (FPU_info->regs->ax)
++#define FPU_EFLAGS            (FPU_info->regs->flags)
++#define FPU_EIP                       (FPU_info->regs->ip)
+ #define FPU_ORIG_EIP          (FPU_info->___orig_eip)
+ #define FPU_lookahead           (I387->soft.lookahead)
+--- kernel-maemo-2.6.28.test.orig/arch/x86/math-emu/get_address.c
++++ kernel-maemo-2.6.28.test/arch/x86/math-emu/get_address.c
+@@ -29,46 +29,43 @@
+ #define FPU_WRITE_BIT 0x10
+ static int reg_offset[] = {
+-      offsetof(struct info, ___eax),
+-      offsetof(struct info, ___ecx),
+-      offsetof(struct info, ___edx),
+-      offsetof(struct info, ___ebx),
+-      offsetof(struct info, ___esp),
+-      offsetof(struct info, ___ebp),
+-      offsetof(struct info, ___esi),
+-      offsetof(struct info, ___edi)
++      offsetof(struct pt_regs, ax),
++      offsetof(struct pt_regs, cx),
++      offsetof(struct pt_regs, dx),
++      offsetof(struct pt_regs, bx),
++      offsetof(struct pt_regs, sp),
++      offsetof(struct pt_regs, bp),
++      offsetof(struct pt_regs, si),
++      offsetof(struct pt_regs, di)
+ };
+-#define REG_(x) (*(long *)(reg_offset[(x)]+(u_char *) FPU_info))
++#define REG_(x) (*(long *)(reg_offset[(x)] + (u_char *)FPU_info->regs))
+ static int reg_offset_vm86[] = {
+-      offsetof(struct info, ___cs),
+-      offsetof(struct info, ___vm86_ds),
+-      offsetof(struct info, ___vm86_es),
+-      offsetof(struct info, ___vm86_fs),
+-      offsetof(struct info, ___vm86_gs),
+-      offsetof(struct info, ___ss),
+-      offsetof(struct info, ___vm86_ds)
++      offsetof(struct pt_regs, cs),
++      offsetof(struct kernel_vm86_regs, ds),
++      offsetof(struct kernel_vm86_regs, es),
++      offsetof(struct kernel_vm86_regs, fs),
++      offsetof(struct kernel_vm86_regs, gs),
++      offsetof(struct pt_regs, ss),
++      offsetof(struct kernel_vm86_regs, ds)
+ };
+ #define VM86_REG_(x) (*(unsigned short *) \
+-                    (reg_offset_vm86[((unsigned)x)]+(u_char *) FPU_info))
+-
+-/* This dummy, gs is not saved on the stack. */
+-#define ___GS ___ds
++              (reg_offset_vm86[((unsigned)x)] + (u_char *)FPU_info->regs))
+ static int reg_offset_pm[] = {
+-      offsetof(struct info, ___cs),
+-      offsetof(struct info, ___ds),
+-      offsetof(struct info, ___es),
+-      offsetof(struct info, ___fs),
+-      offsetof(struct info, ___GS),
+-      offsetof(struct info, ___ss),
+-      offsetof(struct info, ___ds)
++      offsetof(struct pt_regs, cs),
++      offsetof(struct pt_regs, ds),
++      offsetof(struct pt_regs, es),
++      offsetof(struct pt_regs, fs),
++      offsetof(struct pt_regs, ds),   /* dummy, not saved on stack */
++      offsetof(struct pt_regs, ss),
++      offsetof(struct pt_regs, ds)
+ };
+ #define PM_REG_(x) (*(unsigned short *) \
+-                    (reg_offset_pm[((unsigned)x)]+(u_char *) FPU_info))
++              (reg_offset_pm[((unsigned)x)] + (u_char *)FPU_info->regs))
+ /* Decode the SIB byte. This function assumes mod != 0 */
+ static int sib(int mod, unsigned long *fpu_eip)
+@@ -349,34 +346,34 @@
+       }
+       switch (rm) {
+       case 0:
+-              address += FPU_info->___ebx + FPU_info->___esi;
++              address += FPU_info->regs->bx + FPU_info->regs->si;
+               break;
+       case 1:
+-              address += FPU_info->___ebx + FPU_info->___edi;
++              address += FPU_info->regs->bx + FPU_info->regs->di;
+               break;
+       case 2:
+-              address += FPU_info->___ebp + FPU_info->___esi;
++              address += FPU_info->regs->bp + FPU_info->regs->si;
+               if (addr_modes.override.segment == PREFIX_DEFAULT)
+                       addr_modes.override.segment = PREFIX_SS_;
+               break;
+       case 3:
+-              address += FPU_info->___ebp + FPU_info->___edi;
++              address += FPU_info->regs->bp + FPU_info->regs->di;
+               if (addr_modes.override.segment == PREFIX_DEFAULT)
+                       addr_modes.override.segment = PREFIX_SS_;
+               break;
+       case 4:
+-              address += FPU_info->___esi;
++              address += FPU_info->regs->si;
+               break;
+       case 5:
+-              address += FPU_info->___edi;
++              address += FPU_info->regs->di;
+               break;
+       case 6:
+-              address += FPU_info->___ebp;
++              address += FPU_info->regs->bp;
+               if (addr_modes.override.segment == PREFIX_DEFAULT)
+                       addr_modes.override.segment = PREFIX_SS_;
+               break;
+       case 7:
+-              address += FPU_info->___ebx;
++              address += FPU_info->regs->bx;
+               break;
+       }
+--- kernel-maemo-2.6.28.test.orig/arch/x86/mm/fault.c
++++ kernel-maemo-2.6.28.test/arch/x86/mm/fault.c
+@@ -533,7 +533,7 @@
+          happen within a race in page table update. In the later
+          case just flush. */
+-      pgd = pgd_offset(current->mm ?: &init_mm, address);
++      pgd = pgd_offset(current->active_mm, address);
+       pgd_ref = pgd_offset_k(address);
+       if (pgd_none(*pgd_ref))
+               return -1;
+@@ -601,8 +601,6 @@
+       si_code = SEGV_MAPERR;
+-      if (notify_page_fault(regs))
+-              return;
+       if (unlikely(kmmio_fault(regs, address)))
+               return;
+@@ -632,6 +630,9 @@
+               if (spurious_fault(address, error_code))
+                       return;
++              /* kprobes don't want to hook the spurious faults. */
++              if (notify_page_fault(regs))
++                      return;
+               /*
+                * Don't take the mm semaphore here. If we fixup a prefetch
+                * fault we could otherwise deadlock.
+@@ -639,6 +640,9 @@
+               goto bad_area_nosemaphore;
+       }
++      /* kprobes don't want to hook the spurious faults. */
++      if (notify_page_fault(regs))
++              return;
+       /*
+        * It's safe to allow irq's after cr2 has been saved and the
+--- kernel-maemo-2.6.28.test.orig/arch/x86/mm/numa_64.c
++++ kernel-maemo-2.6.28.test/arch/x86/mm/numa_64.c
+@@ -145,7 +145,7 @@
+       return shift;
+ }
+-int early_pfn_to_nid(unsigned long pfn)
++int __meminit  __early_pfn_to_nid(unsigned long pfn)
+ {
+       return phys_to_nid(pfn << PAGE_SHIFT);
+ }
+--- kernel-maemo-2.6.28.test.orig/arch/x86/mm/pageattr.c
++++ kernel-maemo-2.6.28.test/arch/x86/mm/pageattr.c
+@@ -534,6 +534,36 @@
+       return 0;
+ }
++static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr,
++                             int primary)
++{
++      /*
++       * Ignore all non primary paths.
++       */
++      if (!primary)
++              return 0;
++
++      /*
++       * Ignore the NULL PTE for kernel identity mapping, as it is expected
++       * to have holes.
++       * Also set numpages to '1' indicating that we processed cpa req for
++       * one virtual address page and its pfn. TBD: numpages can be set based
++       * on the initial value and the level returned by lookup_address().
++       */
++      if (within(vaddr, PAGE_OFFSET,
++                 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) {
++              cpa->numpages = 1;
++              cpa->pfn = __pa(vaddr) >> PAGE_SHIFT;
++              return 0;
++      } else {
++              WARN(1, KERN_WARNING "CPA: called for zero pte. "
++                      "vaddr = %lx cpa->vaddr = %lx\n", vaddr,
++                      *cpa->vaddr);
++
++              return -EFAULT;
++      }
++}
++
+ static int __change_page_attr(struct cpa_data *cpa, int primary)
+ {
+       unsigned long address;
+@@ -546,20 +576,21 @@
+       else
+               address = *cpa->vaddr;
++      /*
++       * If we're called with lazy mmu updates enabled, the
++       * in-memory pte state may be stale.  Flush pending updates to
++       * bring them up to date.
++       */
++      arch_flush_lazy_mmu_mode();
++
+ repeat:
+       kpte = lookup_address(address, &level);
+       if (!kpte)
+-              return 0;
++              return __cpa_process_fault(cpa, address, primary);
+       old_pte = *kpte;
+-      if (!pte_val(old_pte)) {
+-              if (!primary)
+-                      return 0;
+-              WARN(1, KERN_WARNING "CPA: called for zero pte. "
+-                     "vaddr = %lx cpa->vaddr = %lx\n", address,
+-                     *cpa->vaddr);
+-              return -EINVAL;
+-      }
++      if (!pte_val(old_pte))
++              return __cpa_process_fault(cpa, address, primary);
+       if (level == PG_LEVEL_4K) {
+               pte_t new_pte;
+@@ -657,12 +688,7 @@
+               vaddr = *cpa->vaddr;
+       if (!(within(vaddr, PAGE_OFFSET,
+-                  PAGE_OFFSET + (max_low_pfn_mapped << PAGE_SHIFT))
+-#ifdef CONFIG_X86_64
+-              || within(vaddr, PAGE_OFFSET + (1UL<<32),
+-                  PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))
+-#endif
+-      )) {
++                  PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) {
+               alias_cpa = *cpa;
+               temp_cpa_vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT);
+@@ -835,6 +861,13 @@
+       } else
+               cpa_flush_all(cache);
++      /*
++       * If we've been called with lazy mmu updates enabled, then
++       * make sure that everything gets flushed out before we
++       * return.
++       */
++      arch_flush_lazy_mmu_mode();
++
+ out:
+       return ret;
+ }
+--- kernel-maemo-2.6.28.test.orig/arch/x86/mm/pat.c
++++ kernel-maemo-2.6.28.test/arch/x86/mm/pat.c
+@@ -333,11 +333,23 @@
+                                             req_type & _PAGE_CACHE_MASK);
+       }
+-      is_range_ram = pagerange_is_ram(start, end);
+-      if (is_range_ram == 1)
+-              return reserve_ram_pages_type(start, end, req_type, new_type);
+-      else if (is_range_ram < 0)
+-              return -EINVAL;
++      if (new_type)
++              *new_type = actual_type;
++
++      /*
++       * For legacy reasons, some parts of the physical address range in the
++       * legacy 1MB region is treated as non-RAM (even when listed as RAM in
++       * the e820 tables).  So we will track the memory attributes of this
++       * legacy 1MB region using the linear memtype_list always.
++       */
++      if (end >= ISA_END_ADDRESS) {
++              is_range_ram = pagerange_is_ram(start, end);
++              if (is_range_ram == 1)
++                      return reserve_ram_pages_type(start, end, req_type,
++                                                    new_type);
++              else if (is_range_ram < 0)
++                      return -EINVAL;
++      }
+       new  = kmalloc(sizeof(struct memtype), GFP_KERNEL);
+       if (!new)
+@@ -347,9 +359,6 @@
+       new->end        = end;
+       new->type       = actual_type;
+-      if (new_type)
+-              *new_type = actual_type;
+-
+       spin_lock(&memtype_lock);
+       if (cached_entry && start >= cached_start)
+@@ -437,11 +446,19 @@
+       if (is_ISA_range(start, end - 1))
+               return 0;
+-      is_range_ram = pagerange_is_ram(start, end);
+-      if (is_range_ram == 1)
+-              return free_ram_pages_type(start, end);
+-      else if (is_range_ram < 0)
+-              return -EINVAL;
++      /*
++       * For legacy reasons, some parts of the physical address range in the
++       * legacy 1MB region is treated as non-RAM (even when listed as RAM in
++       * the e820 tables).  So we will track the memory attributes of this
++       * legacy 1MB region using the linear memtype_list always.
++       */
++      if (end >= ISA_END_ADDRESS) {
++              is_range_ram = pagerange_is_ram(start, end);
++              if (is_range_ram == 1)
++                      return free_ram_pages_type(start, end);
++              else if (is_range_ram < 0)
++                      return -EINVAL;
++      }
+       spin_lock(&memtype_lock);
+       list_for_each_entry(entry, &memtype_list, nd) {
+--- kernel-maemo-2.6.28.test.orig/arch/x86/oprofile/op_model_ppro.c
++++ kernel-maemo-2.6.28.test/arch/x86/oprofile/op_model_ppro.c
+@@ -78,8 +78,18 @@
+       if (cpu_has_arch_perfmon) {
+               union cpuid10_eax eax;
+               eax.full = cpuid_eax(0xa);
+-              if (counter_width < eax.split.bit_width)
+-                      counter_width = eax.split.bit_width;
++
++              /*
++               * For Core2 (family 6, model 15), don't reset the
++               * counter width:
++               */
++              if (!(eax.split.version_id == 0 &&
++                      current_cpu_data.x86 == 6 &&
++                              current_cpu_data.x86_model == 15)) {
++
++                      if (counter_width < eax.split.bit_width)
++                              counter_width = eax.split.bit_width;
++              }
+       }
+       /* clear all counters */
+--- kernel-maemo-2.6.28.test.orig/arch/x86/pci/i386.c
++++ kernel-maemo-2.6.28.test/arch/x86/pci/i386.c
+@@ -329,6 +329,9 @@
+                       return -EINVAL;
+               }
+               flags = new_flags;
++              vma->vm_page_prot = __pgprot(
++                      (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK) |
++                      flags);
+       }
+       if (((vma->vm_pgoff < max_low_pfn_mapped) ||
+--- kernel-maemo-2.6.28.test.orig/arch/x86/pci/irq.c
++++ kernel-maemo-2.6.28.test/arch/x86/pci/irq.c
+@@ -573,6 +573,7 @@
+       case PCI_DEVICE_ID_INTEL_ICH7_1:
+       case PCI_DEVICE_ID_INTEL_ICH7_30:
+       case PCI_DEVICE_ID_INTEL_ICH7_31:
++      case PCI_DEVICE_ID_INTEL_TGP_LPC:
+       case PCI_DEVICE_ID_INTEL_ESB2_0:
+       case PCI_DEVICE_ID_INTEL_ICH8_0:
+       case PCI_DEVICE_ID_INTEL_ICH8_1:
+--- kernel-maemo-2.6.28.test.orig/arch/x86/xen/enlighten.c
++++ kernel-maemo-2.6.28.test/arch/x86/xen/enlighten.c
+@@ -1669,6 +1669,9 @@
+          possible map and a non-dummy shared_info. */
+       per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
++      local_irq_disable();
++      early_boot_irqs_off();
++
+       xen_raw_console_write("mapping kernel into physical memory\n");
+       pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
+--- kernel-maemo-2.6.28.test.orig/block/genhd.c
++++ kernel-maemo-2.6.28.test/block/genhd.c
+@@ -98,7 +98,7 @@
+       if (flags & DISK_PITER_REVERSE)
+               piter->idx = ptbl->len - 1;
+-      else if (flags & DISK_PITER_INCL_PART0)
++      else if (flags & (DISK_PITER_INCL_PART0 | DISK_PITER_INCL_EMPTY_PART0))
+               piter->idx = 0;
+       else
+               piter->idx = 1;
+@@ -134,7 +134,8 @@
+       /* determine iteration parameters */
+       if (piter->flags & DISK_PITER_REVERSE) {
+               inc = -1;
+-              if (piter->flags & DISK_PITER_INCL_PART0)
++              if (piter->flags & (DISK_PITER_INCL_PART0 |
++                                  DISK_PITER_INCL_EMPTY_PART0))
+                       end = -1;
+               else
+                       end = 0;
+@@ -150,7 +151,10 @@
+               part = rcu_dereference(ptbl->part[piter->idx]);
+               if (!part)
+                       continue;
+-              if (!(piter->flags & DISK_PITER_INCL_EMPTY) && !part->nr_sects)
++              if (!part->nr_sects &&
++                  !(piter->flags & DISK_PITER_INCL_EMPTY) &&
++                  !(piter->flags & DISK_PITER_INCL_EMPTY_PART0 &&
++                    piter->idx == 0))
+                       continue;
+               get_device(part_to_dev(part));
+@@ -994,7 +998,7 @@
+                               "\n\n");
+       */
+  
+-      disk_part_iter_init(&piter, gp, DISK_PITER_INCL_PART0);
++      disk_part_iter_init(&piter, gp, DISK_PITER_INCL_EMPTY_PART0);
+       while ((hd = disk_part_iter_next(&piter))) {
+               cpu = part_stat_lock();
+               part_round_stats(cpu, hd);
+--- kernel-maemo-2.6.28.test.orig/crypto/algapi.c
++++ kernel-maemo-2.6.28.test/crypto/algapi.c
+@@ -149,6 +149,9 @@
+               if (q == alg)
+                       goto err;
++              if (crypto_is_moribund(q))
++                      continue;
++
+               if (crypto_is_larval(q)) {
+                       if (!strcmp(alg->cra_driver_name, q->cra_driver_name))
+                               goto err;
+@@ -197,7 +200,7 @@
+       down_write(&crypto_alg_sem);
+       list_for_each_entry(q, &crypto_alg_list, cra_list) {
+-              if (!crypto_is_larval(q))
++              if (crypto_is_moribund(q) || !crypto_is_larval(q))
+                       continue;
+               test = (struct crypto_larval *)q;
+@@ -210,6 +213,7 @@
+       goto unlock;
+ found:
++      q->cra_flags |= CRYPTO_ALG_DEAD;
+       alg = test->adult;
+       if (err || list_empty(&alg->cra_list))
+               goto complete;
+--- kernel-maemo-2.6.28.test.orig/crypto/async_tx/async_tx.c
++++ kernel-maemo-2.6.28.test/crypto/async_tx/async_tx.c
+@@ -124,6 +124,8 @@
+       if (!dep)
+               return;
++      /* we'll submit tx->next now, so clear the link */
++      tx->next = NULL;
+       chan = dep->chan;
+       /* keep submitting up until a channel switch is detected
+--- kernel-maemo-2.6.28.test.orig/crypto/authenc.c
++++ kernel-maemo-2.6.28.test/crypto/authenc.c
+@@ -157,16 +157,19 @@
+       dstp = sg_page(dst);
+       vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset;
+-      sg_init_table(cipher, 2);
+-      sg_set_buf(cipher, iv, ivsize);
+-      authenc_chain(cipher, dst, vdst == iv + ivsize);
++      if (ivsize) {
++              sg_init_table(cipher, 2);
++              sg_set_buf(cipher, iv, ivsize);
++              authenc_chain(cipher, dst, vdst == iv + ivsize);
++              dst = cipher;
++      }
+       cryptlen = req->cryptlen + ivsize;
+-      hash = crypto_authenc_hash(req, flags, cipher, cryptlen);
++      hash = crypto_authenc_hash(req, flags, dst, cryptlen);
+       if (IS_ERR(hash))
+               return PTR_ERR(hash);
+-      scatterwalk_map_and_copy(hash, cipher, cryptlen,
++      scatterwalk_map_and_copy(hash, dst, cryptlen,
+                                crypto_aead_authsize(authenc), 1);
+       return 0;
+ }
+@@ -284,11 +287,14 @@
+       srcp = sg_page(src);
+       vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + src->offset;
+-      sg_init_table(cipher, 2);
+-      sg_set_buf(cipher, iv, ivsize);
+-      authenc_chain(cipher, src, vsrc == iv + ivsize);
++      if (ivsize) {
++              sg_init_table(cipher, 2);
++              sg_set_buf(cipher, iv, ivsize);
++              authenc_chain(cipher, src, vsrc == iv + ivsize);
++              src = cipher;
++      }
+-      return crypto_authenc_verify(req, cipher, cryptlen + ivsize);
++      return crypto_authenc_verify(req, src, cryptlen + ivsize);
+ }
+ static int crypto_authenc_decrypt(struct aead_request *req)
+--- kernel-maemo-2.6.28.test.orig/crypto/ccm.c
++++ kernel-maemo-2.6.28.test/crypto/ccm.c
+@@ -266,6 +266,8 @@
+       if (assoclen) {
+               pctx->ilen = format_adata(idata, assoclen);
+               get_data_to_compute(cipher, pctx, req->assoc, req->assoclen);
++      } else {
++              pctx->ilen = 0;
+       }
+       /* compute plaintext into mac */
+--- kernel-maemo-2.6.28.test.orig/drivers/acpi/asus_acpi.c
++++ kernel-maemo-2.6.28.test/drivers/acpi/asus_acpi.c
+@@ -143,6 +143,7 @@
+                                                        S1300N, S5200N*/
+               A4S,            /* Z81sp */
+               F3Sa,           /* (Centrino) */
++              R1F,
+               END_MODEL
+       } model;                /* Models currently supported */
+       u16 event_count[128];   /* Count for each event TODO make this better */
+@@ -420,7 +421,18 @@
+               .display_get    = "\\ADVG",
+               .display_set    = "SDSP",
+       },
+-
++      {
++              .name = "R1F",
++              .mt_bt_switch = "BLED",
++              .mt_mled = "MLED",
++              .mt_wled = "WLED",
++              .mt_lcd_switch = "\\Q10",
++              .lcd_status = "\\GP06",
++              .brightness_set = "SPLV",
++              .brightness_get = "GPLV",
++              .display_set = "SDSP",
++              .display_get = "\\INFB"
++      }
+ };
+ /* procdir we use */
+@@ -1165,6 +1177,8 @@
+               return W3V;
+       else if (strncmp(model, "W5A", 3) == 0)
+               return W5A;
++      else if (strncmp(model, "R1F", 3) == 0)
++              return R1F;
+       else if (strncmp(model, "A4S", 3) == 0)
+               return A4S;
+       else if (strncmp(model, "F3Sa", 4) == 0)
+--- kernel-maemo-2.6.28.test.orig/drivers/acpi/battery.c
++++ kernel-maemo-2.6.28.test/drivers/acpi/battery.c
+@@ -471,7 +471,7 @@
+ static int acpi_battery_update(struct acpi_battery *battery)
+ {
+-      int result;
++      int result, old_present = acpi_battery_present(battery);
+       result = acpi_battery_get_status(battery);
+       if (result)
+               return result;
+@@ -482,7 +482,8 @@
+               return 0;
+       }
+ #endif
+-      if (!battery->update_time) {
++      if (!battery->update_time ||
++          old_present != acpi_battery_present(battery)) {
+               result = acpi_battery_get_info(battery);
+               if (result)
+                       return result;
+--- kernel-maemo-2.6.28.test.orig/drivers/acpi/dock.c
++++ kernel-maemo-2.6.28.test/drivers/acpi/dock.c
+@@ -855,10 +855,14 @@
+ static ssize_t show_docked(struct device *dev,
+                          struct device_attribute *attr, char *buf)
+ {
++      struct acpi_device *tmp;
++
+       struct dock_station *dock_station = *((struct dock_station **)
+               dev->platform_data);
+-      return snprintf(buf, PAGE_SIZE, "%d\n", dock_present(dock_station));
++      if (ACPI_SUCCESS(acpi_bus_get_device(dock_station->handle, &tmp)))
++              return snprintf(buf, PAGE_SIZE, "1\n");
++      return snprintf(buf, PAGE_SIZE, "0\n");
+ }
+ static DEVICE_ATTR(docked, S_IRUGO, show_docked, NULL);
+@@ -1142,9 +1146,10 @@
+ static void __exit dock_exit(void)
+ {
+       struct dock_station *dock_station;
++      struct dock_station *tmp;
+       unregister_acpi_bus_notifier(&dock_acpi_notifier);
+-      list_for_each_entry(dock_station, &dock_stations, sibiling)
++      list_for_each_entry_safe(dock_station, tmp, &dock_stations, sibiling)
+               dock_remove(dock_station);
+ }
+--- kernel-maemo-2.6.28.test.orig/drivers/acpi/ec.c
++++ kernel-maemo-2.6.28.test/drivers/acpi/ec.c
+@@ -759,9 +759,10 @@
+       struct acpi_namespace_node *node = handle;
+       struct acpi_ec *ec = context;
+       int value = 0;
+-      if (sscanf(node->name.ascii, "_Q%x", &value) == 1) {
++
++      if (sscanf(node->name.ascii, "_Q%2x", &value) == 1)
+               acpi_ec_add_query_handler(ec, value, handle, NULL, NULL);
+-      }
++
+       return AE_OK;
+ }
+--- kernel-maemo-2.6.28.test.orig/drivers/acpi/namespace/nsutils.c
++++ kernel-maemo-2.6.28.test/drivers/acpi/namespace/nsutils.c
+@@ -314,9 +314,15 @@
+        *
+        * strlen() + 1 covers the first name_seg, which has no path separator
+        */
+-      if (acpi_ns_valid_root_prefix(next_external_char[0])) {
++      if (acpi_ns_valid_root_prefix(*next_external_char)) {
+               info->fully_qualified = TRUE;
+               next_external_char++;
++
++              /* Skip redundant root_prefix, like \\_SB.PCI0.SBRG.EC0 */
++
++              while (acpi_ns_valid_root_prefix(*next_external_char)) {
++                      next_external_char++;
++              }
+       } else {
+               /*
+                * Handle Carat prefixes
+--- kernel-maemo-2.6.28.test.orig/drivers/acpi/pci_link.c
++++ kernel-maemo-2.6.28.test/drivers/acpi/pci_link.c
+@@ -796,10 +796,6 @@
+       struct list_head *node = NULL;
+       struct acpi_pci_link *link = NULL;
+-
+-      /* Make sure SCI is enabled again (Apple firmware bug?) */
+-      acpi_set_register(ACPI_BITREG_SCI_ENABLE, 1);
+-
+       list_for_each(node, &acpi_link.entries) {
+               link = list_entry(node, struct acpi_pci_link, node);
+               if (!link) {
+--- kernel-maemo-2.6.28.test.orig/drivers/acpi/power.c
++++ kernel-maemo-2.6.28.test/drivers/acpi/power.c
+@@ -151,7 +151,7 @@
+       *state = (sta & 0x01)?ACPI_POWER_RESOURCE_STATE_ON:
+                             ACPI_POWER_RESOURCE_STATE_OFF;
+-      ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource [%s] is %s\n",
++      ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource [%4.4s] is %s\n",
+                         acpi_ut_get_node_name(handle),
+                               *state ? "on" : "off"));
+--- kernel-maemo-2.6.28.test.orig/drivers/acpi/tables.c
++++ kernel-maemo-2.6.28.test/drivers/acpi/tables.c
+@@ -293,7 +293,12 @@
+ int __init acpi_table_init(void)
+ {
+-      acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0);
++      acpi_status status;
++
++      status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0);
++      if (ACPI_FAILURE(status))
++              return 1;
++
+       check_multiple_madt();
+       return 0;
+ }
+--- kernel-maemo-2.6.28.test.orig/drivers/acpi/tables/tbutils.c
++++ kernel-maemo-2.6.28.test/drivers/acpi/tables/tbutils.c
+@@ -512,10 +512,9 @@
+                       if (ACPI_FAILURE(status)) {
+                               ACPI_WARNING((AE_INFO,
+                                             "Truncating %u table entries!",
+-                                            (unsigned)
+-                                            (acpi_gbl_root_table_list.size -
+-                                             acpi_gbl_root_table_list.
+-                                             count)));
++                                            (unsigned) (table_count -
++                                             (acpi_gbl_root_table_list.
++                                             count - 2))));
+                               break;
+                       }
+               }
+--- kernel-maemo-2.6.28.test.orig/drivers/acpi/video.c
++++ kernel-maemo-2.6.28.test/drivers/acpi/video.c
+@@ -36,6 +36,7 @@
+ #include <linux/backlight.h>
+ #include <linux/thermal.h>
+ #include <linux/video_output.h>
++#include <linux/sort.h>
+ #include <asm/uaccess.h>
+ #include <acpi/acpi_bus.h>
+@@ -481,6 +482,7 @@
+       int status = AE_OK;
+       union acpi_object arg0 = { ACPI_TYPE_INTEGER };
+       struct acpi_object_list args = { 1, &arg0 };
++      int state;
+       arg0.integer.value = level;
+@@ -489,6 +491,10 @@
+               status = acpi_evaluate_object(device->dev->handle, "_BCM",
+                                             &args, NULL);
+       device->brightness->curr = level;
++      for (state = 2; state < device->brightness->count; state++)
++              if (level == device->brightness->levels[state])
++                      device->backlight->props.brightness = state - 2;
++
+       return status;
+ }
+@@ -626,6 +632,16 @@
+ }
+ /*
++ * Simple comparison function used to sort backlight levels.
++ */
++
++static int
++acpi_video_cmp_level(const void *a, const void *b)
++{
++      return *(int *)a - *(int *)b;
++}
++
++/*
+  *  Arg:      
+  *    device  : video output device (LCD, CRT, ..)
+  *
+@@ -676,6 +692,10 @@
+               count++;
+       }
++      /* don't sort the first two brightness levels */
++      sort(&br->levels[2], count - 2, sizeof(br->levels[2]),
++              acpi_video_cmp_level, NULL);
++
+       if (count < 2)
+               goto out_free_levels;
+@@ -1000,7 +1020,7 @@
+       }
+       seq_printf(seq, "levels: ");
+-      for (i = 0; i < dev->brightness->count; i++)
++      for (i = 2; i < dev->brightness->count; i++)
+               seq_printf(seq, " %d", dev->brightness->levels[i]);
+       seq_printf(seq, "\ncurrent: %d\n", dev->brightness->curr);
+@@ -1039,7 +1059,7 @@
+               return -EFAULT;
+       /* validate through the list of available levels */
+-      for (i = 0; i < dev->brightness->count; i++)
++      for (i = 2; i < dev->brightness->count; i++)
+               if (level == dev->brightness->levels[i]) {
+                       if (ACPI_SUCCESS
+                           (acpi_video_device_lcd_set_level(dev, level)))
+@@ -1692,7 +1712,7 @@
+       max = max_below = 0;
+       min = min_above = 255;
+       /* Find closest level to level_current */
+-      for (i = 0; i < device->brightness->count; i++) {
++      for (i = 2; i < device->brightness->count; i++) {
+               l = device->brightness->levels[i];
+               if (abs(l - level_current) < abs(delta)) {
+                       delta = l - level_current;
+@@ -1702,7 +1722,7 @@
+       }
+       /* Ajust level_current to closest available level */
+       level_current += delta;
+-      for (i = 0; i < device->brightness->count; i++) {
++      for (i = 2; i < device->brightness->count; i++) {
+               l = device->brightness->levels[i];
+               if (l < min)
+                       min = l;
+@@ -1986,6 +2006,12 @@
+                       device->pnp.bus_id[3] = '0' + instance;
+               instance ++;
+       }
++      /* a hack to fix the duplicate name "VGA" problem on Pa 3553 */
++      if (!strcmp(device->pnp.bus_id, "VGA")) {
++              if (instance)
++                      device->pnp.bus_id[3] = '0' + instance;
++              instance++;
++      }
+       video->device = device;
+       strcpy(acpi_device_name(device), ACPI_VIDEO_BUS_NAME);
+--- kernel-maemo-2.6.28.test.orig/drivers/ata/ata_piix.c
++++ kernel-maemo-2.6.28.test/drivers/ata/ata_piix.c
+@@ -1294,6 +1294,39 @@
+       return map;
+ }
++static bool piix_no_sidpr(struct ata_host *host)
++{
++      struct pci_dev *pdev = to_pci_dev(host->dev);
++
++      /*
++       * Samsung DB-P70 only has three ATA ports exposed and
++       * curiously the unconnected first port reports link online
++       * while not responding to SRST protocol causing excessive
++       * detection delay.
++       *
++       * Unfortunately, the system doesn't carry enough DMI
++       * information to identify the machine but does have subsystem
++       * vendor and device set.  As it's unclear whether the
++       * subsystem vendor/device is used only for this specific
++       * board, the port can't be disabled solely with the
++       * information; however, turning off SIDPR access works around
++       * the problem.  Turn it off.
++       *
++       * This problem is reported in bnc#441240.
++       *
++       * https://bugzilla.novell.com/show_bug.cgi?id=441420
++       */
++      if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == 0x2920 &&
++          pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG &&
++          pdev->subsystem_device == 0xb049) {
++              dev_printk(KERN_WARNING, host->dev,
++                         "Samsung DB-P70 detected, disabling SIDPR\n");
++              return true;
++      }
++
++      return false;
++}
++
+ static int __devinit piix_init_sidpr(struct ata_host *host)
+ {
+       struct pci_dev *pdev = to_pci_dev(host->dev);
+@@ -1307,6 +1340,10 @@
+               if (hpriv->map[i] == IDE)
+                       return 0;
++      /* is it blacklisted? */
++      if (piix_no_sidpr(host))
++              return 0;
++
+       if (!(host->ports[0]->flags & PIIX_FLAG_SIDPR))
+               return 0;
+--- kernel-maemo-2.6.28.test.orig/drivers/ata/libata-core.c
++++ kernel-maemo-2.6.28.test/drivers/ata/libata-core.c
+@@ -1251,14 +1251,16 @@
+ {
+       if (ata_id_has_lba(id)) {
+               if (ata_id_has_lba48(id))
+-                      return ata_id_u64(id, 100);
++                      return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
+               else
+-                      return ata_id_u32(id, 60);
++                      return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
+       } else {
+               if (ata_id_current_chs_valid(id))
+-                      return ata_id_u32(id, 57);
++                      return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
++                             id[ATA_ID_CUR_SECTORS];
+               else
+-                      return id[1] * id[3] * id[6];
++                      return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
++                             id[ATA_ID_SECTORS];
+       }
+ }
+--- kernel-maemo-2.6.28.test.orig/drivers/ata/libata-eh.c
++++ kernel-maemo-2.6.28.test/drivers/ata/libata-eh.c
+@@ -2366,11 +2366,14 @@
+               }
+               /* prereset() might have cleared ATA_EH_RESET.  If so,
+-               * bang classes and return.
++               * bang classes, thaw and return.
+                */
+               if (reset && !(ehc->i.action & ATA_EH_RESET)) {
+                       ata_link_for_each_dev(dev, link)
+                               classes[dev->devno] = ATA_DEV_NONE;
++                      if ((ap->pflags & ATA_PFLAG_FROZEN) &&
++                          ata_is_host_link(link))
++                              ata_eh_thaw_port(ap);
+                       rc = 0;
+                       goto out;
+               }
+@@ -2959,12 +2962,13 @@
+               /* give it just one more chance */
+               ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
+       case -EIO:
+-              if (ehc->tries[dev->devno] == 1 && dev->pio_mode > XFER_PIO_0) {
++              if (ehc->tries[dev->devno] == 1) {
+                       /* This is the last chance, better to slow
+                        * down than lose it.
+                        */
+                       sata_down_spd_limit(ata_dev_phys_link(dev));
+-                      ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
++                      if (dev->pio_mode > XFER_PIO_0)
++                              ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
+               }
+       }
+--- kernel-maemo-2.6.28.test.orig/drivers/ata/pata_hpt37x.c
++++ kernel-maemo-2.6.28.test/drivers/ata/pata_hpt37x.c
+@@ -8,7 +8,7 @@
+  * Copyright (C) 1999-2003            Andre Hedrick <andre@linux-ide.org>
+  * Portions Copyright (C) 2001                Sun Microsystems, Inc.
+  * Portions Copyright (C) 2003                Red Hat Inc
+- * Portions Copyright (C) 2005-2007   MontaVista Software, Inc.
++ * Portions Copyright (C) 2005-2009   MontaVista Software, Inc.
+  *
+  * TODO
+  *    Look into engine reset on timeout errors. Should not be required.
+@@ -24,7 +24,7 @@
+ #include <linux/libata.h>
+ #define DRV_NAME      "pata_hpt37x"
+-#define DRV_VERSION   "0.6.11"
++#define DRV_VERSION   "0.6.12"
+ struct hpt_clock {
+       u8      xfer_speed;
+@@ -445,23 +445,6 @@
+ }
+ /**
+- *    hpt370_bmdma_start              -       DMA engine begin
+- *    @qc: ATA command
+- *
+- *    The 370 and 370A want us to reset the DMA engine each time we
+- *    use it. The 372 and later are fine.
+- */
+-
+-static void hpt370_bmdma_start(struct ata_queued_cmd *qc)
+-{
+-      struct ata_port *ap = qc->ap;
+-      struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+-      pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
+-      udelay(10);
+-      ata_bmdma_start(qc);
+-}
+-
+-/**
+  *    hpt370_bmdma_end                -       DMA engine stop
+  *    @qc: ATA command
+  *
+@@ -598,7 +581,6 @@
+ static struct ata_port_operations hpt370_port_ops = {
+       .inherits       = &ata_bmdma_port_ops,
+-      .bmdma_start    = hpt370_bmdma_start,
+       .bmdma_stop     = hpt370_bmdma_stop,
+       .mode_filter    = hpt370_filter,
+--- kernel-maemo-2.6.28.test.orig/drivers/ata/pata_via.c
++++ kernel-maemo-2.6.28.test/drivers/ata/pata_via.c
+@@ -86,6 +86,10 @@
+       VIA_SATA_PATA   = 0x800, /* SATA/PATA combined configuration */
+ };
++enum {
++      VIA_IDFLAG_SINGLE = (1 << 0), /* single channel controller) */
++};
++
+ /*
+  * VIA SouthBridge chips.
+  */
+@@ -97,12 +101,17 @@
+       u8 rev_max;
+       u16 flags;
+ } via_isa_bridges[] = {
++      { "vx855",      PCI_DEVICE_ID_VIA_VX855,    0x00, 0x2f,
++        VIA_UDMA_133 | VIA_BAD_AST | VIA_SATA_PATA },
+       { "vx800",      PCI_DEVICE_ID_VIA_VX800,    0x00, 0x2f, VIA_UDMA_133 |
+       VIA_BAD_AST | VIA_SATA_PATA },
++      { "vt8261",     PCI_DEVICE_ID_VIA_8261,     0x00, 0x2f,
++        VIA_UDMA_133 | VIA_BAD_AST },
+       { "vt8237s",    PCI_DEVICE_ID_VIA_8237S,    0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
+       { "vt8251",     PCI_DEVICE_ID_VIA_8251,     0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
+       { "cx700",      PCI_DEVICE_ID_VIA_CX700,    0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_SATA_PATA },
+-      { "vt6410",     PCI_DEVICE_ID_VIA_6410,     0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_NO_ENABLES},
++      { "vt6410",     PCI_DEVICE_ID_VIA_6410,     0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_NO_ENABLES },
++      { "vt6415",     PCI_DEVICE_ID_VIA_6415,     0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_NO_ENABLES },
+       { "vt8237a",    PCI_DEVICE_ID_VIA_8237A,    0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
+       { "vt8237",     PCI_DEVICE_ID_VIA_8237,     0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
+       { "vt8235",     PCI_DEVICE_ID_VIA_8235,     0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
+@@ -122,6 +131,8 @@
+       { "vt82c586",   PCI_DEVICE_ID_VIA_82C586_0, 0x00, 0x0f, VIA_UDMA_NONE | VIA_SET_FIFO },
+       { "vt82c576",   PCI_DEVICE_ID_VIA_82C576,   0x00, 0x2f, VIA_UDMA_NONE | VIA_SET_FIFO | VIA_NO_UNMASK },
+       { "vt82c576",   PCI_DEVICE_ID_VIA_82C576,   0x00, 0x2f, VIA_UDMA_NONE | VIA_SET_FIFO | VIA_NO_UNMASK | VIA_BAD_ID },
++      { "vtxxxx",     PCI_DEVICE_ID_VIA_ANON,    0x00, 0x2f,
++        VIA_UDMA_133 | VIA_BAD_AST },
+       { NULL }
+ };
+@@ -460,6 +471,7 @@
+       static int printed_version;
+       u8 enable;
+       u32 timing;
++      unsigned long flags = id->driver_data;
+       int rc;
+       if (!printed_version++)
+@@ -469,9 +481,13 @@
+       if (rc)
+               return rc;
++      if (flags & VIA_IDFLAG_SINGLE)
++              ppi[1] = &ata_dummy_port_info;
++
+       /* To find out how the IDE will behave and what features we
+          actually have to look at the bridge not the IDE controller */
+-      for (config = via_isa_bridges; config->id; config++)
++      for (config = via_isa_bridges; config->id != PCI_DEVICE_ID_VIA_ANON;
++           config++)
+               if ((isa = pci_get_device(PCI_VENDOR_ID_VIA +
+                       !!(config->flags & VIA_BAD_ID),
+                       config->id, NULL))) {
+@@ -482,10 +498,6 @@
+                       pci_dev_put(isa);
+               }
+-      if (!config->id) {
+-              printk(KERN_WARNING "via: Unknown VIA SouthBridge, disabling.\n");
+-              return -ENODEV;
+-      }
+       pci_dev_put(isa);
+       if (!(config->flags & VIA_NO_ENABLES)) {
+@@ -582,11 +594,13 @@
+ #endif
+ static const struct pci_device_id via[] = {
++      { PCI_VDEVICE(VIA, 0x0415), },
+       { PCI_VDEVICE(VIA, 0x0571), },
+       { PCI_VDEVICE(VIA, 0x0581), },
+       { PCI_VDEVICE(VIA, 0x1571), },
+       { PCI_VDEVICE(VIA, 0x3164), },
+       { PCI_VDEVICE(VIA, 0x5324), },
++      { PCI_VDEVICE(VIA, 0xC409), VIA_IDFLAG_SINGLE },
+       { },
+ };
+--- kernel-maemo-2.6.28.test.orig/drivers/ata/sata_mv.c
++++ kernel-maemo-2.6.28.test/drivers/ata/sata_mv.c
+@@ -669,8 +669,8 @@
+       { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
+       /* RocketRAID 1720/174x have different identifiers */
+       { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
+-      { PCI_VDEVICE(TTI, 0x1740), chip_508x },
+-      { PCI_VDEVICE(TTI, 0x1742), chip_508x },
++      { PCI_VDEVICE(TTI, 0x1740), chip_6042 },
++      { PCI_VDEVICE(TTI, 0x1742), chip_6042 },
+       { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
+       { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
+@@ -883,7 +883,7 @@
+               struct mv_host_priv *hpriv = ap->host->private_data;
+               int hardport = mv_hardport_from_port(ap->port_no);
+               void __iomem *hc_mmio = mv_hc_base_from_port(
+-                                      mv_host_base(ap->host), hardport);
++                                      mv_host_base(ap->host), ap->port_no);
+               u32 hc_irq_cause, ipending;
+               /* clear EDMA event indicators, if any */
+--- kernel-maemo-2.6.28.test.orig/drivers/ata/sata_nv.c
++++ kernel-maemo-2.6.28.test/drivers/ata/sata_nv.c
+@@ -305,10 +305,10 @@
+ static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
+ static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
++static int nv_noclassify_hardreset(struct ata_link *link, unsigned int *class,
++                                 unsigned long deadline);
+ static void nv_nf2_freeze(struct ata_port *ap);
+ static void nv_nf2_thaw(struct ata_port *ap);
+-static int nv_nf2_hardreset(struct ata_link *link, unsigned int *class,
+-                          unsigned long deadline);
+ static void nv_ck804_freeze(struct ata_port *ap);
+ static void nv_ck804_thaw(struct ata_port *ap);
+ static int nv_adma_slave_config(struct scsi_device *sdev);
+@@ -352,6 +352,7 @@
+       NFORCE3 = NFORCE2,      /* NF2 == NF3 as far as sata_nv is concerned */
+       CK804,
+       ADMA,
++      MCP5x,
+       SWNCQ,
+ };
+@@ -363,10 +364,10 @@
+       { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
+       { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
+       { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
+-      { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), SWNCQ },
+-      { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), SWNCQ },
+-      { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), SWNCQ },
+-      { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), SWNCQ },
++      { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
++      { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
++      { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
++      { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
+       { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
+       { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
+       { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
+@@ -420,26 +421,33 @@
+       .hardreset              = ATA_OP_NULL,
+ };
+-/* OSDL bz3352 reports that nf2/3 controllers can't determine device
+- * signature reliably.  Also, the following thread reports detection
+- * failure on cold boot with the standard debouncing timing.
++/* nf2 is ripe with hardreset related problems.
++ *
++ * kernel bz#3352 reports nf2/3 controllers can't determine device
++ * signature reliably.  The following thread reports detection failure
++ * on cold boot with the standard debouncing timing.
+  *
+  * http://thread.gmane.org/gmane.linux.ide/34098
+  *
+- * Debounce with hotplug timing and request follow-up SRST.
++ * And bz#12176 reports that hardreset simply doesn't work on nf2.
++ * Give up on it and just don't do hardreset.
+  */
+ static struct ata_port_operations nv_nf2_ops = {
+-      .inherits               = &nv_common_ops,
++      .inherits               = &nv_generic_ops,
+       .freeze                 = nv_nf2_freeze,
+       .thaw                   = nv_nf2_thaw,
+-      .hardreset              = nv_nf2_hardreset,
+ };
+-/* CK804 finally gets hardreset right */
++/* For initial probing after boot and hot plugging, hardreset mostly
++ * works fine on CK804 but curiously, reprobing on the initial port by
++ * rescanning or rmmod/insmod fails to acquire the initial D2H Reg FIS
++ * in somewhat undeterministic way.  Use noclassify hardreset.
++ */
+ static struct ata_port_operations nv_ck804_ops = {
+       .inherits               = &nv_common_ops,
+       .freeze                 = nv_ck804_freeze,
+       .thaw                   = nv_ck804_thaw,
++      .hardreset              = nv_noclassify_hardreset,
+       .host_stop              = nv_ck804_host_stop,
+ };
+@@ -467,8 +475,19 @@
+       .host_stop              = nv_adma_host_stop,
+ };
++/* Kernel bz#12351 reports that when SWNCQ is enabled, for hotplug to
++ * work, hardreset should be used and hardreset can't report proper
++ * signature, which suggests that mcp5x is closer to nf2 as long as
++ * reset quirkiness is concerned.  Define separate ops for mcp5x with
++ * nv_noclassify_hardreset().
++ */
++static struct ata_port_operations nv_mcp5x_ops = {
++      .inherits               = &nv_common_ops,
++      .hardreset              = nv_noclassify_hardreset,
++};
++
+ static struct ata_port_operations nv_swncq_ops = {
+-      .inherits               = &nv_generic_ops,
++      .inherits               = &nv_mcp5x_ops,
+       .qc_defer               = ata_std_qc_defer,
+       .qc_prep                = nv_swncq_qc_prep,
+@@ -531,6 +550,15 @@
+               .port_ops       = &nv_adma_ops,
+               .private_data   = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
+       },
++      /* MCP5x */
++      {
++              .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
++              .pio_mask       = NV_PIO_MASK,
++              .mwdma_mask     = NV_MWDMA_MASK,
++              .udma_mask      = NV_UDMA_MASK,
++              .port_ops       = &nv_mcp5x_ops,
++              .private_data   = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
++      },
+       /* SWNCQ */
+       {
+               .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+@@ -1530,6 +1558,17 @@
+       return 0;
+ }
++static int nv_noclassify_hardreset(struct ata_link *link, unsigned int *class,
++                                 unsigned long deadline)
++{
++      bool online;
++      int rc;
++
++      rc = sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
++                               &online, NULL);
++      return online ? -EAGAIN : rc;
++}
++
+ static void nv_nf2_freeze(struct ata_port *ap)
+ {
+       void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
+@@ -1554,17 +1593,6 @@
+       iowrite8(mask, scr_addr + NV_INT_ENABLE);
+ }
+-static int nv_nf2_hardreset(struct ata_link *link, unsigned int *class,
+-                          unsigned long deadline)
+-{
+-      bool online;
+-      int rc;
+-
+-      rc = sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
+-                               &online, NULL);
+-      return online ? -EAGAIN : rc;
+-}
+-
+ static void nv_ck804_freeze(struct ata_port *ap)
+ {
+       void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
+@@ -2355,14 +2383,9 @@
+       if (type == CK804 && adma_enabled) {
+               dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
+               type = ADMA;
+-      }
+-
+-      if (type == SWNCQ) {
+-              if (swncq_enabled)
+-                      dev_printk(KERN_NOTICE, &pdev->dev,
+-                                 "Using SWNCQ mode\n");
+-              else
+-                      type = GENERIC;
++      } else if (type == MCP5x && swncq_enabled) {
++              dev_printk(KERN_NOTICE, &pdev->dev, "Using SWNCQ mode\n");
++              type = SWNCQ;
+       }
+       ppi[0] = &nv_port_info[type];
+--- kernel-maemo-2.6.28.test.orig/drivers/ata/sata_via.c
++++ kernel-maemo-2.6.28.test/drivers/ata/sata_via.c
+@@ -92,6 +92,8 @@
+       { PCI_VDEVICE(VIA, 0x5372), vt6420 },
+       { PCI_VDEVICE(VIA, 0x7372), vt6420 },
+       { PCI_VDEVICE(VIA, 0x5287), vt8251 }, /* 2 sata chnls (Master/Slave) */
++      { PCI_VDEVICE(VIA, 0x9000), vt8251 },
++      { PCI_VDEVICE(VIA, 0x9040), vt8251 },
+       { }     /* terminate list */
+ };
+--- kernel-maemo-2.6.28.test.orig/drivers/atm/fore200e.c
++++ kernel-maemo-2.6.28.test/drivers/atm/fore200e.c
+@@ -2519,8 +2519,8 @@
+       return err;
+     sprintf(buf, "%s%s", fore200e->bus->proc_name, FW_EXT);
+-    if (request_firmware(&firmware, buf, device) == 1) {
+-      printk(FORE200E "missing %s firmware image\n", fore200e->bus->model_name);
++    if ((err = request_firmware(&firmware, buf, device)) < 0) {
++      printk(FORE200E "problem loading firmware image %s\n", fore200e->bus->model_name);
+       return err;
+     }
+--- kernel-maemo-2.6.28.test.orig/drivers/block/aoe/aoe.h
++++ kernel-maemo-2.6.28.test/drivers/block/aoe/aoe.h
+@@ -18,6 +18,7 @@
+ enum {
+       AOECMD_ATA,
+       AOECMD_CFG,
++      AOECMD_VEND_MIN = 0xf0,
+       AOEFL_RSP = (1<<3),
+       AOEFL_ERR = (1<<2),
+--- kernel-maemo-2.6.28.test.orig/drivers/block/aoe/aoenet.c
++++ kernel-maemo-2.6.28.test/drivers/block/aoe/aoenet.c
+@@ -153,6 +153,8 @@
+               aoecmd_cfg_rsp(skb);
+               break;
+       default:
++              if (h->cmd >= AOECMD_VEND_MIN)
++                      break;  /* don't complain about vendor commands */
+               printk(KERN_INFO "aoe: unknown cmd %d\n", h->cmd);
+       }
+ exit:
+--- kernel-maemo-2.6.28.test.orig/drivers/block/nbd.c
++++ kernel-maemo-2.6.28.test/drivers/block/nbd.c
+@@ -406,6 +406,7 @@
+       ret = sysfs_create_file(&disk_to_dev(lo->disk)->kobj, &pid_attr.attr);
+       if (ret) {
+               printk(KERN_ERR "nbd: sysfs_create_file failed!");
++              lo->pid = 0;
+               return ret;
+       }
+@@ -413,6 +414,7 @@
+               nbd_end_request(req);
+       sysfs_remove_file(&disk_to_dev(lo->disk)->kobj, &pid_attr.attr);
++      lo->pid = 0;
+       return 0;
+ }
+@@ -547,6 +549,15 @@
+               BUG_ON(lo->magic != LO_MAGIC);
++              if (unlikely(!lo->sock)) {
++                      printk(KERN_ERR "%s: Attempted send on closed socket\n",
++                              lo->disk->disk_name);
++                      req->errors++;
++                      nbd_end_request(req);
++                      spin_lock_irq(q->queue_lock);
++                      continue;
++              }
++
+               spin_lock_irq(&lo->queue_lock);
+               list_add_tail(&req->queuelist, &lo->waiting_queue);
+               spin_unlock_irq(&lo->queue_lock);
+@@ -648,6 +659,8 @@
+               set_capacity(lo->disk, lo->bytesize >> 9);
+               return 0;
+       case NBD_DO_IT:
++              if (lo->pid)
++                      return -EBUSY;
+               if (!lo->file)
+                       return -EINVAL;
+               thread = kthread_create(nbd_thread, lo, lo->disk->disk_name);
+--- kernel-maemo-2.6.28.test.orig/drivers/block/xen-blkfront.c
++++ kernel-maemo-2.6.28.test/drivers/block/xen-blkfront.c
+@@ -40,6 +40,7 @@
+ #include <linux/hdreg.h>
+ #include <linux/cdrom.h>
+ #include <linux/module.h>
++#include <linux/scatterlist.h>
+ #include <xen/xenbus.h>
+ #include <xen/grant_table.h>
+@@ -82,6 +83,7 @@
+       enum blkif_state connected;
+       int ring_ref;
+       struct blkif_front_ring ring;
++      struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+       unsigned int evtchn, irq;
+       struct request_queue *rq;
+       struct work_struct work;
+@@ -204,12 +206,11 @@
+       struct blkfront_info *info = req->rq_disk->private_data;
+       unsigned long buffer_mfn;
+       struct blkif_request *ring_req;
+-      struct req_iterator iter;
+-      struct bio_vec *bvec;
+       unsigned long id;
+       unsigned int fsect, lsect;
+-      int ref;
++      int i, ref;
+       grant_ref_t gref_head;
++      struct scatterlist *sg;
+       if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
+               return 1;
+@@ -238,12 +239,13 @@
+       if (blk_barrier_rq(req))
+               ring_req->operation = BLKIF_OP_WRITE_BARRIER;
+-      ring_req->nr_segments = 0;
+-      rq_for_each_segment(bvec, req, iter) {
+-              BUG_ON(ring_req->nr_segments == BLKIF_MAX_SEGMENTS_PER_REQUEST);
+-              buffer_mfn = pfn_to_mfn(page_to_pfn(bvec->bv_page));
+-              fsect = bvec->bv_offset >> 9;
+-              lsect = fsect + (bvec->bv_len >> 9) - 1;
++      ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
++      BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
++
++      for_each_sg(info->sg, sg, ring_req->nr_segments, i) {
++              buffer_mfn = pfn_to_mfn(page_to_pfn(sg_page(sg)));
++              fsect = sg->offset >> 9;
++              lsect = fsect + (sg->length >> 9) - 1;
+               /* install a grant reference. */
+               ref = gnttab_claim_grant_reference(&gref_head);
+               BUG_ON(ref == -ENOSPC);
+@@ -254,16 +256,12 @@
+                               buffer_mfn,
+                               rq_data_dir(req) );
+-              info->shadow[id].frame[ring_req->nr_segments] =
+-                              mfn_to_pfn(buffer_mfn);
+-
+-              ring_req->seg[ring_req->nr_segments] =
++              info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
++              ring_req->seg[i] =
+                               (struct blkif_request_segment) {
+                                       .gref       = ref,
+                                       .first_sect = fsect,
+                                       .last_sect  = lsect };
+-
+-              ring_req->nr_segments++;
+       }
+       info->ring.req_prod_pvt++;
+@@ -628,6 +626,8 @@
+       SHARED_RING_INIT(sring);
+       FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
++      sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
++
+       err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
+       if (err < 0) {
+               free_page((unsigned long)sring);
+--- kernel-maemo-2.6.28.test.orig/drivers/bluetooth/btsdio.c
++++ kernel-maemo-2.6.28.test/drivers/bluetooth/btsdio.c
+@@ -86,6 +86,7 @@
+       err = sdio_writesb(data->func, REG_TDAT, skb->data, skb->len);
+       if (err < 0) {
++              skb_pull(skb, 4);
+               sdio_writeb(data->func, 0x01, REG_PC_WRT, NULL);
+               return err;
+       }
+--- kernel-maemo-2.6.28.test.orig/drivers/char/agp/generic.c
++++ kernel-maemo-2.6.28.test/drivers/char/agp/generic.c
+@@ -1226,7 +1226,7 @@
+       int i, ret = -ENOMEM;
+       for (i = 0; i < num_pages; i++) {
+-              page = alloc_page(GFP_KERNEL | GFP_DMA32);
++              page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
+               /* agp_free_memory() needs gart address */
+               if (page == NULL)
+                       goto out;
+@@ -1257,7 +1257,7 @@
+ {
+       struct page * page;
+-      page = alloc_page(GFP_KERNEL | GFP_DMA32);
++      page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
+       if (page == NULL)
+               return NULL;
+--- kernel-maemo-2.6.28.test.orig/drivers/char/agp/intel-agp.c
++++ kernel-maemo-2.6.28.test/drivers/char/agp/intel-agp.c
+@@ -40,6 +40,8 @@
+ #define PCI_DEVICE_ID_INTEL_Q45_IG          0x2E12
+ #define PCI_DEVICE_ID_INTEL_G45_HB          0x2E20
+ #define PCI_DEVICE_ID_INTEL_G45_IG          0x2E22
++#define PCI_DEVICE_ID_INTEL_G41_HB          0x2E30
++#define PCI_DEVICE_ID_INTEL_G41_IG          0x2E32
+ /* cover 915 and 945 variants */
+ #define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
+@@ -63,7 +65,8 @@
+ #define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_E_HB || \
+               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
+               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \
+-              agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB)
++              agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
++              agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB)
+ extern int agp_memory_reserved;
+@@ -630,13 +633,15 @@
+                       break;
+               }
+       }
+-      if (gtt_entries > 0)
++      if (gtt_entries > 0) {
+               dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
+                      gtt_entries / KB(1), local ? "local" : "stolen");
+-      else
++              gtt_entries /= KB(4);
++      } else {
+               dev_info(&agp_bridge->dev->dev,
+                      "no pre-allocated video memory detected\n");
+-      gtt_entries /= KB(4);
++              gtt_entries = 0;
++      }
+       intel_private.gtt_entries = gtt_entries;
+ }
+@@ -1196,6 +1201,7 @@
+       case PCI_DEVICE_ID_INTEL_IGD_E_HB:
+       case PCI_DEVICE_ID_INTEL_Q45_HB:
+       case PCI_DEVICE_ID_INTEL_G45_HB:
++      case PCI_DEVICE_ID_INTEL_G41_HB:
+               *gtt_offset = *gtt_size = MB(2);
+               break;
+       default:
+@@ -2156,13 +2162,15 @@
+       { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33",
+               NULL, &intel_g33_driver },
+       { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0,
+-          "Mobile Intel? GM45 Express", NULL, &intel_i965_driver },
++          "Mobile Intel® GM45 Express", NULL, &intel_i965_driver },
+       { PCI_DEVICE_ID_INTEL_IGD_E_HB, PCI_DEVICE_ID_INTEL_IGD_E_IG, 0,
+           "Intel Integrated Graphics Device", NULL, &intel_i965_driver },
+       { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, 0,
+           "Q45/Q43", NULL, &intel_i965_driver },
+       { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, 0,
+           "G45/G43", NULL, &intel_i965_driver },
++      { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0,
++          "G41", NULL, &intel_i965_driver },
+       { 0, 0, 0, NULL, NULL, NULL }
+ };
+@@ -2360,6 +2368,7 @@
+       ID(PCI_DEVICE_ID_INTEL_IGD_E_HB),
+       ID(PCI_DEVICE_ID_INTEL_Q45_HB),
+       ID(PCI_DEVICE_ID_INTEL_G45_HB),
++      ID(PCI_DEVICE_ID_INTEL_G41_HB),
+       { }
+ };
+--- kernel-maemo-2.6.28.test.orig/drivers/char/raw.c
++++ kernel-maemo-2.6.28.test/drivers/char/raw.c
+@@ -90,6 +90,7 @@
+       blkdev_put(bdev, filp->f_mode);
+ out:
+       mutex_unlock(&raw_mutex);
++      unlock_kernel();
+       return err;
+ }
+--- kernel-maemo-2.6.28.test.orig/drivers/char/selection.c
++++ kernel-maemo-2.6.28.test/drivers/char/selection.c
+@@ -268,7 +268,7 @@
+       /* Allocate a new buffer before freeing the old one ... */
+       multiplier = use_unicode ? 3 : 1;  /* chars can take up to 3 bytes */
+-      bp = kmalloc((sel_end-sel_start)/2*multiplier+1, GFP_KERNEL);
++      bp = kmalloc(((sel_end-sel_start)/2+1)*multiplier, GFP_KERNEL);
+       if (!bp) {
+               printk(KERN_WARNING "selection: kmalloc() failed\n");
+               clear_selection();
+--- kernel-maemo-2.6.28.test.orig/drivers/cpuidle/governors/menu.c
++++ kernel-maemo-2.6.28.test/drivers/cpuidle/governors/menu.c
+@@ -16,12 +16,14 @@
+ #include <mach/pm.h>
+ #define BREAK_FUZZ    4       /* 4 us */
++#define PRED_HISTORY_PCT      50
+ struct menu_device {
+       int             last_state_idx;
+       unsigned int    expected_us;
+       unsigned int    predicted_us;
++      unsigned int    current_predicted_us;
+       unsigned int    last_measured_us;
+       unsigned int    elapsed_us;
+ };
+@@ -53,6 +55,12 @@
+       device_not_idle = !pm_check_idle();
++      /* Recalculate predicted_us based on prediction_history_pct */
++      data->predicted_us *= PRED_HISTORY_PCT;
++      data->predicted_us += (100 - PRED_HISTORY_PCT) *
++                              data->current_predicted_us;
++      data->predicted_us /= 100;
++
+       /* find the deepest idle state that satisfies our constraints */
+       for (i = CPUIDLE_DRIVER_STATE_START + 1; i < dev->state_count; i++) {
+               struct cpuidle_state *s = &dev->states[i];
+@@ -104,7 +112,7 @@
+               measured_us = -1;
+       /* Predict time until next break event */
+-      data->predicted_us = max(measured_us, data->last_measured_us);
++      data->current_predicted_us = max(measured_us, data->last_measured_us);
+       if (last_idle_us + BREAK_FUZZ <
+           data->expected_us - target->exit_latency) {
+--- kernel-maemo-2.6.28.test.orig/drivers/crypto/ixp4xx_crypto.c
++++ kernel-maemo-2.6.28.test/drivers/crypto/ixp4xx_crypto.c
+@@ -101,6 +101,7 @@
+       u32 phys_addr;
+       u32 __reserved[4];
+       struct buffer_desc *next;
++      enum dma_data_direction dir;
+ };
+ struct crypt_ctl {
+@@ -132,14 +133,10 @@
+ struct ablk_ctx {
+       struct buffer_desc *src;
+       struct buffer_desc *dst;
+-      unsigned src_nents;
+-      unsigned dst_nents;
+ };
+ struct aead_ctx {
+       struct buffer_desc *buffer;
+-      unsigned short assoc_nents;
+-      unsigned short src_nents;
+       struct scatterlist ivlist;
+       /* used when the hmac is not on one sg entry */
+       u8 *hmac_virt;
+@@ -312,7 +309,7 @@
+       }
+ }
+-static void free_buf_chain(struct buffer_desc *buf, u32 phys)
++static void free_buf_chain(struct device *dev, struct buffer_desc *buf,u32 phys)
+ {
+       while (buf) {
+               struct buffer_desc *buf1;
+@@ -320,6 +317,7 @@
+               buf1 = buf->next;
+               phys1 = buf->phys_next;
++              dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir);
+               dma_pool_free(buffer_pool, buf, phys);
+               buf = buf1;
+               phys = phys1;
+@@ -348,7 +346,6 @@
+       struct crypt_ctl *crypt;
+       struct ixp_ctx *ctx;
+       int failed;
+-      enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
+       failed = phys & 0x1 ? -EBADMSG : 0;
+       phys &= ~0x3;
+@@ -358,13 +355,8 @@
+       case CTL_FLAG_PERFORM_AEAD: {
+               struct aead_request *req = crypt->data.aead_req;
+               struct aead_ctx *req_ctx = aead_request_ctx(req);
+-              dma_unmap_sg(dev, req->assoc, req_ctx->assoc_nents,
+-                              DMA_TO_DEVICE);
+-              dma_unmap_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL);
+-              dma_unmap_sg(dev, req->src, req_ctx->src_nents,
+-                              DMA_BIDIRECTIONAL);
+-              free_buf_chain(req_ctx->buffer, crypt->src_buf);
++              free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
+               if (req_ctx->hmac_virt) {
+                       finish_scattered_hmac(crypt);
+               }
+@@ -374,16 +366,11 @@
+       case CTL_FLAG_PERFORM_ABLK: {
+               struct ablkcipher_request *req = crypt->data.ablk_req;
+               struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
+-              int nents;
++
+               if (req_ctx->dst) {
+-                      nents = req_ctx->dst_nents;
+-                      dma_unmap_sg(dev, req->dst, nents, DMA_FROM_DEVICE);
+-                      free_buf_chain(req_ctx->dst, crypt->dst_buf);
+-                      src_direction = DMA_TO_DEVICE;
+-              }
+-              nents = req_ctx->src_nents;
+-              dma_unmap_sg(dev, req->src, nents, src_direction);
+-              free_buf_chain(req_ctx->src, crypt->src_buf);
++                      free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
++              }
++              free_buf_chain(dev, req_ctx->src, crypt->src_buf);
+               req->base.complete(&req->base, failed);
+               break;
+       }
+@@ -748,56 +735,35 @@
+       return 0;
+ }
+-static int count_sg(struct scatterlist *sg, int nbytes)
+-{
+-      int i;
+-      for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
+-              nbytes -= sg->length;
+-      return i;
+-}
+-
+-static struct buffer_desc *chainup_buffers(struct scatterlist *sg,
+-                      unsigned nbytes, struct buffer_desc *buf, gfp_t flags)
++static struct buffer_desc *chainup_buffers(struct device *dev,
++              struct scatterlist *sg, unsigned nbytes,
++              struct buffer_desc *buf, gfp_t flags,
++              enum dma_data_direction dir)
+ {
+-      int nents = 0;
+-
+-      while (nbytes > 0) {
++      for (;nbytes > 0; sg = scatterwalk_sg_next(sg)) {
++              unsigned len = min(nbytes, sg->length);
+               struct buffer_desc *next_buf;
+               u32 next_buf_phys;
+-              unsigned len = min(nbytes, sg_dma_len(sg));
++              void *ptr;
+-              nents++;
+               nbytes -= len;
+-              if (!buf->phys_addr) {
+-                      buf->phys_addr = sg_dma_address(sg);
+-                      buf->buf_len = len;
+-                      buf->next = NULL;
+-                      buf->phys_next = 0;
+-                      goto next;
+-              }
+-              /* Two consecutive chunks on one page may be handled by the old
+-               * buffer descriptor, increased by the length of the new one
+-               */
+-              if (sg_dma_address(sg) == buf->phys_addr + buf->buf_len) {
+-                      buf->buf_len += len;
+-                      goto next;
+-              }
++              ptr = page_address(sg_page(sg)) + sg->offset;
+               next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
+-              if (!next_buf)
+-                      return NULL;
++              if (!next_buf) {
++                      buf = NULL;
++                      break;
++              }
++              sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
+               buf->next = next_buf;
+               buf->phys_next = next_buf_phys;
+-
+               buf = next_buf;
+-              buf->next = NULL;
+-              buf->phys_next = 0;
++
+               buf->phys_addr = sg_dma_address(sg);
+               buf->buf_len = len;
+-next:
+-              if (nbytes > 0) {
+-                      sg = sg_next(sg);
+-              }
++              buf->dir = dir;
+       }
++      buf->next = NULL;
++      buf->phys_next = 0;
+       return buf;
+ }
+@@ -858,12 +824,12 @@
+       struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+       struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+       unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
+-      int ret = -ENOMEM;
+       struct ix_sa_dir *dir;
+       struct crypt_ctl *crypt;
+-      unsigned int nbytes = req->nbytes, nents;
++      unsigned int nbytes = req->nbytes;
+       enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
+       struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
++      struct buffer_desc src_hook;
+       gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+                               GFP_KERNEL : GFP_ATOMIC;
+@@ -876,7 +842,7 @@
+       crypt = get_crypt_desc();
+       if (!crypt)
+-              return ret;
++              return -ENOMEM;
+       crypt->data.ablk_req = req;
+       crypt->crypto_ctx = dir->npe_ctx_phys;
+@@ -889,53 +855,41 @@
+       BUG_ON(ivsize && !req->info);
+       memcpy(crypt->iv, req->info, ivsize);
+       if (req->src != req->dst) {
++              struct buffer_desc dst_hook;
+               crypt->mode |= NPE_OP_NOT_IN_PLACE;
+-              nents = count_sg(req->dst, nbytes);
+               /* This was never tested by Intel
+                * for more than one dst buffer, I think. */
+-              BUG_ON(nents != 1);
+-              req_ctx->dst_nents = nents;
+-              dma_map_sg(dev, req->dst, nents, DMA_FROM_DEVICE);
+-              req_ctx->dst = dma_pool_alloc(buffer_pool, flags,&crypt->dst_buf);
+-              if (!req_ctx->dst)
+-                      goto unmap_sg_dest;
+-              req_ctx->dst->phys_addr = 0;
+-              if (!chainup_buffers(req->dst, nbytes, req_ctx->dst, flags))
++              BUG_ON(req->dst->length < nbytes);
++              req_ctx->dst = NULL;
++              if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
++                                      flags, DMA_FROM_DEVICE))
+                       goto free_buf_dest;
+               src_direction = DMA_TO_DEVICE;
++              req_ctx->dst = dst_hook.next;
++              crypt->dst_buf = dst_hook.phys_next;
+       } else {
+               req_ctx->dst = NULL;
+-              req_ctx->dst_nents = 0;
+       }
+-      nents = count_sg(req->src, nbytes);
+-      req_ctx->src_nents = nents;
+-      dma_map_sg(dev, req->src, nents, src_direction);
+-
+-      req_ctx->src = dma_pool_alloc(buffer_pool, flags, &crypt->src_buf);
+-      if (!req_ctx->src)
+-              goto unmap_sg_src;
+-      req_ctx->src->phys_addr = 0;
+-      if (!chainup_buffers(req->src, nbytes, req_ctx->src, flags))
++      req_ctx->src = NULL;
++      if (!chainup_buffers(dev, req->src, nbytes, &src_hook,
++                              flags, src_direction))
+               goto free_buf_src;
++      req_ctx->src = src_hook.next;
++      crypt->src_buf = src_hook.phys_next;
+       crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
+       qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
+       BUG_ON(qmgr_stat_overflow(SEND_QID));
+       return -EINPROGRESS;
+ free_buf_src:
+-      free_buf_chain(req_ctx->src, crypt->src_buf);
+-unmap_sg_src:
+-      dma_unmap_sg(dev, req->src, req_ctx->src_nents, src_direction);
++      free_buf_chain(dev, req_ctx->src, crypt->src_buf);
+ free_buf_dest:
+       if (req->src != req->dst) {
+-              free_buf_chain(req_ctx->dst, crypt->dst_buf);
+-unmap_sg_dest:
+-              dma_unmap_sg(dev, req->src, req_ctx->dst_nents,
+-                      DMA_FROM_DEVICE);
++              free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
+       }
+       crypt->ctl_flags = CTL_FLAG_UNUSED;
+-      return ret;
++      return -ENOMEM;
+ }
+ static int ablk_encrypt(struct ablkcipher_request *req)
+@@ -983,7 +937,7 @@
+                       break;
+               offset += sg->length;
+-              sg = sg_next(sg);
++              sg = scatterwalk_sg_next(sg);
+       }
+       return (start + nbytes > offset + sg->length);
+ }
+@@ -995,11 +949,10 @@
+       struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
+       unsigned ivsize = crypto_aead_ivsize(tfm);
+       unsigned authsize = crypto_aead_authsize(tfm);
+-      int ret = -ENOMEM;
+       struct ix_sa_dir *dir;
+       struct crypt_ctl *crypt;
+-      unsigned int cryptlen, nents;
+-      struct buffer_desc *buf;
++      unsigned int cryptlen;
++      struct buffer_desc *buf, src_hook;
+       struct aead_ctx *req_ctx = aead_request_ctx(req);
+       gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+                               GFP_KERNEL : GFP_ATOMIC;
+@@ -1020,7 +973,7 @@
+       }
+       crypt = get_crypt_desc();
+       if (!crypt)
+-              return ret;
++              return -ENOMEM;
+       crypt->data.aead_req = req;
+       crypt->crypto_ctx = dir->npe_ctx_phys;
+@@ -1039,31 +992,27 @@
+               BUG(); /* -ENOTSUP because of my lazyness */
+       }
+-      req_ctx->buffer = dma_pool_alloc(buffer_pool, flags, &crypt->src_buf);
+-      if (!req_ctx->buffer)
+-              goto out;
+-      req_ctx->buffer->phys_addr = 0;
+       /* ASSOC data */
+-      nents = count_sg(req->assoc, req->assoclen);
+-      req_ctx->assoc_nents = nents;
+-      dma_map_sg(dev, req->assoc, nents, DMA_TO_DEVICE);
+-      buf = chainup_buffers(req->assoc, req->assoclen, req_ctx->buffer,flags);
++      buf = chainup_buffers(dev, req->assoc, req->assoclen, &src_hook,
++              flags, DMA_TO_DEVICE);
++      req_ctx->buffer = src_hook.next;
++      crypt->src_buf = src_hook.phys_next;
+       if (!buf)
+-              goto unmap_sg_assoc;
++              goto out;
+       /* IV */
+       sg_init_table(&req_ctx->ivlist, 1);
+       sg_set_buf(&req_ctx->ivlist, iv, ivsize);
+-      dma_map_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL);
+-      buf = chainup_buffers(&req_ctx->ivlist, ivsize, buf, flags);
++      buf = chainup_buffers(dev, &req_ctx->ivlist, ivsize, buf, flags,
++                      DMA_BIDIRECTIONAL);
+       if (!buf)
+-              goto unmap_sg_iv;
++              goto free_chain;
+       if (unlikely(hmac_inconsistent(req->src, cryptlen, authsize))) {
+               /* The 12 hmac bytes are scattered,
+                * we need to copy them into a safe buffer */
+               req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
+                               &crypt->icv_rev_aes);
+               if (unlikely(!req_ctx->hmac_virt))
+-                      goto unmap_sg_iv;
++                      goto free_chain;
+               if (!encrypt) {
+                       scatterwalk_map_and_copy(req_ctx->hmac_virt,
+                               req->src, cryptlen, authsize, 0);
+@@ -1073,33 +1022,28 @@
+               req_ctx->hmac_virt = NULL;
+       }
+       /* Crypt */
+-      nents = count_sg(req->src, cryptlen + authsize);
+-      req_ctx->src_nents = nents;
+-      dma_map_sg(dev, req->src, nents, DMA_BIDIRECTIONAL);
+-      buf = chainup_buffers(req->src, cryptlen + authsize, buf, flags);
++      buf = chainup_buffers(dev, req->src, cryptlen + authsize, buf, flags,
++                      DMA_BIDIRECTIONAL);
+       if (!buf)
+-              goto unmap_sg_src;
++              goto free_hmac_virt;
+       if (!req_ctx->hmac_virt) {
+               crypt->icv_rev_aes = buf->phys_addr + buf->buf_len - authsize;
+       }
++
+       crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
+       qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
+       BUG_ON(qmgr_stat_overflow(SEND_QID));
+       return -EINPROGRESS;
+-unmap_sg_src:
+-      dma_unmap_sg(dev, req->src, req_ctx->src_nents, DMA_BIDIRECTIONAL);
++free_hmac_virt:
+       if (req_ctx->hmac_virt) {
+               dma_pool_free(buffer_pool, req_ctx->hmac_virt,
+                               crypt->icv_rev_aes);
+       }
+-unmap_sg_iv:
+-      dma_unmap_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL);
+-unmap_sg_assoc:
+-      dma_unmap_sg(dev, req->assoc, req_ctx->assoc_nents, DMA_TO_DEVICE);
+-      free_buf_chain(req_ctx->buffer, crypt->src_buf);
++free_chain:
++      free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
+ out:
+       crypt->ctl_flags = CTL_FLAG_UNUSED;
+-      return ret;
++      return -ENOMEM;
+ }
+ static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
+--- kernel-maemo-2.6.28.test.orig/drivers/dma/ioat_dma.c
++++ kernel-maemo-2.6.28.test/drivers/dma/ioat_dma.c
+@@ -1341,12 +1341,11 @@
+  */
+ #define IOAT_TEST_SIZE 2000
+-DECLARE_COMPLETION(test_completion);
+ static void ioat_dma_test_callback(void *dma_async_param)
+ {
+-      printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n",
+-              dma_async_param);
+-      complete(&test_completion);
++      struct completion *cmp = dma_async_param;
++
++      complete(cmp);
+ }
+ /**
+@@ -1363,6 +1362,7 @@
+       dma_addr_t dma_dest, dma_src;
+       dma_cookie_t cookie;
+       int err = 0;
++      struct completion cmp;
+       src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
+       if (!src)
+@@ -1402,8 +1402,9 @@
+       }
+       async_tx_ack(tx);
++      init_completion(&cmp);
+       tx->callback = ioat_dma_test_callback;
+-      tx->callback_param = (void *)0x8086;
++      tx->callback_param = &cmp;
+       cookie = tx->tx_submit(tx);
+       if (cookie < 0) {
+               dev_err(&device->pdev->dev,
+@@ -1413,7 +1414,7 @@
+       }
+       device->common.device_issue_pending(dma_chan);
+-      wait_for_completion_timeout(&test_completion, msecs_to_jiffies(3000));
++      wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
+       if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL)
+                                       != DMA_SUCCESS) {
+--- kernel-maemo-2.6.28.test.orig/drivers/firewire/fw-ohci.c
++++ kernel-maemo-2.6.28.test/drivers/firewire/fw-ohci.c
+@@ -226,7 +226,7 @@
+ #define CONTEXT_DEAD  0x0800
+ #define CONTEXT_ACTIVE        0x0400
+-#define OHCI1394_MAX_AT_REQ_RETRIES   0x2
++#define OHCI1394_MAX_AT_REQ_RETRIES   0xf
+ #define OHCI1394_MAX_AT_RESP_RETRIES  0x2
+ #define OHCI1394_MAX_PHYS_RESP_RETRIES        0x8
+--- kernel-maemo-2.6.28.test.orig/drivers/firewire/fw-sbp2.c
++++ kernel-maemo-2.6.28.test/drivers/firewire/fw-sbp2.c
+@@ -357,15 +357,17 @@
+               .model                  = ~0,
+               .workarounds            = SBP2_WORKAROUND_128K_MAX_TRANS,
+       },
+-
+       /*
+-       * There are iPods (2nd gen, 3rd gen) with model_id == 0, but
+-       * these iPods do not feature the read_capacity bug according
+-       * to one report.  Read_capacity behaviour as well as model_id
+-       * could change due to Apple-supplied firmware updates though.
++       * iPod 2nd generation: needs 128k max transfer size workaround
++       * iPod 3rd generation: needs fix capacity workaround
+        */
+-
+-      /* iPod 4th generation. */ {
++      {
++              .firmware_revision      = 0x0a2700,
++              .model                  = 0x000000,
++              .workarounds            = SBP2_WORKAROUND_128K_MAX_TRANS |
++                                        SBP2_WORKAROUND_FIX_CAPACITY,
++      },
++      /* iPod 4th generation */ {
+               .firmware_revision      = 0x0a2700,
+               .model                  = 0x000021,
+               .workarounds            = SBP2_WORKAROUND_FIX_CAPACITY,
+@@ -1282,6 +1284,19 @@
+       .id_table = sbp2_id_table,
+ };
++static void sbp2_unmap_scatterlist(struct device *card_device,
++                                 struct sbp2_command_orb *orb)
++{
++      if (scsi_sg_count(orb->cmd))
++              dma_unmap_sg(card_device, scsi_sglist(orb->cmd),
++                           scsi_sg_count(orb->cmd),
++                           orb->cmd->sc_data_direction);
++
++      if (orb->request.misc & cpu_to_be32(COMMAND_ORB_PAGE_TABLE_PRESENT))
++              dma_unmap_single(card_device, orb->page_table_bus,
++                               sizeof(orb->page_table), DMA_TO_DEVICE);
++}
++
+ static unsigned int
+ sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
+ {
+@@ -1361,15 +1376,7 @@
+       dma_unmap_single(device->card->device, orb->base.request_bus,
+                        sizeof(orb->request), DMA_TO_DEVICE);
+-
+-      if (scsi_sg_count(orb->cmd) > 0)
+-              dma_unmap_sg(device->card->device, scsi_sglist(orb->cmd),
+-                           scsi_sg_count(orb->cmd),
+-                           orb->cmd->sc_data_direction);
+-
+-      if (orb->page_table_bus != 0)
+-              dma_unmap_single(device->card->device, orb->page_table_bus,
+-                               sizeof(orb->page_table), DMA_TO_DEVICE);
++      sbp2_unmap_scatterlist(device->card->device, orb);
+       orb->cmd->result = result;
+       orb->done(orb->cmd);
+@@ -1500,8 +1507,10 @@
+       orb->base.request_bus =
+               dma_map_single(device->card->device, &orb->request,
+                              sizeof(orb->request), DMA_TO_DEVICE);
+-      if (dma_mapping_error(device->card->device, orb->base.request_bus))
++      if (dma_mapping_error(device->card->device, orb->base.request_bus)) {
++              sbp2_unmap_scatterlist(device->card->device, orb);
+               goto out;
++      }
+       sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, generation,
+                     lu->command_block_agent_address + SBP2_ORB_POINTER);
+--- kernel-maemo-2.6.28.test.orig/drivers/firmware/dell_rbu.c
++++ kernel-maemo-2.6.28.test/drivers/firmware/dell_rbu.c
+@@ -576,7 +576,7 @@
+ {
+       int size = 0;
+       if (!pos)
+-              size = sprintf(buffer, "%s\n", image_type);
++              size = scnprintf(buffer, count, "%s\n", image_type);
+       return size;
+ }
+@@ -648,7 +648,7 @@
+       int size = 0;
+       if (!pos) {
+               spin_lock(&rbu_data.lock);
+-              size = sprintf(buffer, "%lu\n", rbu_data.packetsize);
++              size = scnprintf(buffer, count, "%lu\n", rbu_data.packetsize);
+               spin_unlock(&rbu_data.lock);
+       }
+       return size;
+--- kernel-maemo-2.6.28.test.orig/drivers/gpu/drm/drm_agpsupport.c
++++ kernel-maemo-2.6.28.test/drivers/gpu/drm/drm_agpsupport.c
+@@ -33,10 +33,11 @@
+ #include "drmP.h"
+ #include <linux/module.h>
+-#include <asm/agp.h>
+ #if __OS_HAS_AGP
++#include <asm/agp.h>
++
+ /**
+  * Get AGP information.
+  *
+--- kernel-maemo-2.6.28.test.orig/drivers/gpu/drm/drm_irq.c
++++ kernel-maemo-2.6.28.test/drivers/gpu/drm/drm_irq.c
+@@ -259,7 +259,8 @@
+  */
+ int drm_irq_uninstall(struct drm_device * dev)
+ {
+-      int irq_enabled;
++      unsigned long irqflags;
++      int irq_enabled, i;
+       if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+               return -EINVAL;
+@@ -269,6 +270,16 @@
+       dev->irq_enabled = 0;
+       mutex_unlock(&dev->struct_mutex);
++      /*
++       * Wake up any waiters so they don't hang.
++       */
++      spin_lock_irqsave(&dev->vbl_lock, irqflags);
++      for (i = 0; i < dev->num_crtcs; i++) {
++              DRM_WAKEUP(&dev->vbl_queue[i]);
++              dev->vblank_enabled[i] = 0;
++      }
++      spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
++
+       if (!irq_enabled)
+               return -EINVAL;
+@@ -617,8 +628,9 @@
+               DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
+                         vblwait->request.sequence, crtc);
+               DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
+-                          ((drm_vblank_count(dev, crtc)
+-                            - vblwait->request.sequence) <= (1 << 23)));
++                          (((drm_vblank_count(dev, crtc) -
++                             vblwait->request.sequence) <= (1 << 23)) ||
++                           !dev->irq_enabled));
+               if (ret != -EINTR) {
+                       struct timeval now;
+--- kernel-maemo-2.6.28.test.orig/drivers/gpu/drm/i915/i915_gem.c
++++ kernel-maemo-2.6.28.test/drivers/gpu/drm/i915/i915_gem.c
+@@ -1161,6 +1161,8 @@
+       struct drm_mm_node *free_space;
+       int page_count, ret;
++      if (dev_priv->mm.suspended)
++              return -EBUSY;
+       if (alignment == 0)
+               alignment = PAGE_SIZE;
+       if (alignment & (PAGE_SIZE - 1)) {
+@@ -2029,13 +2031,15 @@
+               /* error other than GTT full, or we've already tried again */
+               if (ret != -ENOMEM || pin_tries >= 1) {
+-                      DRM_ERROR("Failed to pin buffers %d\n", ret);
++                      if (ret != -ERESTARTSYS)
++                              DRM_ERROR("Failed to pin buffers %d\n", ret);
+                       goto err;
+               }
+               /* unpin all of our buffers */
+               for (i = 0; i < pinned; i++)
+                       i915_gem_object_unpin(object_list[i]);
++              pinned = 0;
+               /* evict everyone we can from the aperture */
+               ret = i915_gem_evict_everything(dev);
+@@ -2178,7 +2182,8 @@
+       if (obj_priv->gtt_space == NULL) {
+               ret = i915_gem_object_bind_to_gtt(obj, alignment);
+               if (ret != 0) {
+-                      DRM_ERROR("Failure to bind: %d", ret);
++                      if (ret != -EBUSY && ret != -ERESTARTSYS)
++                              DRM_ERROR("Failure to bind: %d", ret);
+                       return ret;
+               }
+       }
+@@ -2700,20 +2705,21 @@
+               dev_priv->mm.wedged = 0;
+       }
+-      ret = i915_gem_init_ringbuffer(dev);
+-      if (ret != 0)
+-              return ret;
+-
+       dev_priv->mm.gtt_mapping = io_mapping_create_wc(dev->agp->base,
+                                                       dev->agp->agp_info.aper_size
+                                                       * 1024 * 1024);
+       mutex_lock(&dev->struct_mutex);
++      dev_priv->mm.suspended = 0;
++
++      ret = i915_gem_init_ringbuffer(dev);
++      if (ret != 0)
++              return ret;
++
+       BUG_ON(!list_empty(&dev_priv->mm.active_list));
+       BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+       BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
+       BUG_ON(!list_empty(&dev_priv->mm.request_list));
+-      dev_priv->mm.suspended = 0;
+       mutex_unlock(&dev->struct_mutex);
+       drm_irq_install(dev);
+--- kernel-maemo-2.6.28.test.orig/drivers/gpu/drm/i915/i915_irq.c
++++ kernel-maemo-2.6.28.test/drivers/gpu/drm/i915/i915_irq.c
+@@ -400,6 +400,12 @@
+ {
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       unsigned long irqflags;
++      int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
++      u32 pipeconf;
++
++      pipeconf = I915_READ(pipeconf_reg);
++      if (!(pipeconf & PIPEACONF_ENABLE))
++              return -EINVAL;
+       spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+       if (IS_I965G(dev))
+--- kernel-maemo-2.6.28.test.orig/drivers/hid/hid-core.c
++++ kernel-maemo-2.6.28.test/drivers/hid/hid-core.c
+@@ -1302,6 +1302,12 @@
+       { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
++      { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) },
++      { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) },
++      { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651) },
++      { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) },
++      { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
++      { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, 0x030c) },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
+@@ -1529,10 +1535,6 @@
+       { HID_USB_DEVICE(USB_VENDOR_ID_SOUNDGRAPH, USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD3) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_TENX, USB_DEVICE_ID_TENX_IBUDDY1) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_TENX, USB_DEVICE_ID_TENX_IBUDDY2) },
+-      { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) },
+-      { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) },
+-      { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651) },
+-      { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_LABPRO) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_GOTEMP) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_SKIP) },
+@@ -1543,8 +1545,6 @@
+       { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_1_PHIDGETSERVO_20) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_8_8_4_IF_KIT) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) },
+-      { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
+-      { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
+       { }
+ };
+--- kernel-maemo-2.6.28.test.orig/drivers/hid/hid-microsoft.c
++++ kernel-maemo-2.6.28.test/drivers/hid/hid-microsoft.c
+@@ -30,7 +30,7 @@
+ #define MS_NOGET      0x10
+ /*
+- * Microsoft Wireless Desktop Receiver (Model 1028) has several
++ * Microsoft Wireless Desktop Receiver (Model 1028) has
+  * 'Usage Min/Max' where it ought to have 'Physical Min/Max'
+  */
+ static void ms_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+@@ -38,17 +38,12 @@
+ {
+       unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
+-      if ((quirks & MS_RDESC) && rsize == 571 && rdesc[284] == 0x19 &&
+-                      rdesc[286] == 0x2a && rdesc[304] == 0x19 &&
+-                      rdesc[306] == 0x29 && rdesc[352] == 0x1a &&
+-                      rdesc[355] == 0x2a && rdesc[557] == 0x19 &&
++      if ((quirks & MS_RDESC) && rsize == 571 && rdesc[557] == 0x19 &&
+                       rdesc[559] == 0x29) {
+               dev_info(&hdev->dev, "fixing up Microsoft Wireless Receiver "
+                               "Model 1028 report descriptor\n");
+-              rdesc[284] = rdesc[304] = rdesc[557] = 0x35;
+-              rdesc[352] = 0x36;
+-              rdesc[286] = rdesc[355] = 0x46;
+-              rdesc[306] = rdesc[559] = 0x45;
++              rdesc[557] = 0x35;
++              rdesc[559] = 0x45;
+       }
+ }
+--- kernel-maemo-2.6.28.test.orig/drivers/hid/hid-sony.c
++++ kernel-maemo-2.6.28.test/drivers/hid/hid-sony.c
+@@ -102,7 +102,7 @@
+       }
+       ret = sony_set_operational(hdev);
+-      if (ret)
++      if (ret < 0)
+               goto err_stop;
+       return 0;
+--- kernel-maemo-2.6.28.test.orig/drivers/hwmon/abituguru3.c
++++ kernel-maemo-2.6.28.test/drivers/hwmon/abituguru3.c
+@@ -1153,7 +1153,7 @@
+ static inline int abituguru3_dmi_detect(void)
+ {
+-      return -ENODEV;
++      return 1;
+ }
+ #endif /* CONFIG_DMI */
+--- kernel-maemo-2.6.28.test.orig/drivers/hwmon/f71882fg.c
++++ kernel-maemo-2.6.28.test/drivers/hwmon/f71882fg.c
+@@ -837,7 +837,7 @@
+       devid = superio_inw(sioaddr, SIO_REG_MANID);
+       if (devid != SIO_FINTEK_ID) {
+-              printk(KERN_INFO DRVNAME ": Not a Fintek device\n");
++              pr_debug(DRVNAME ": Not a Fintek device\n");
+               goto exit;
+       }
+--- kernel-maemo-2.6.28.test.orig/drivers/hwmon/it87.c
++++ kernel-maemo-2.6.28.test/drivers/hwmon/it87.c
+@@ -207,7 +207,7 @@
+ #define TEMP_TO_REG(val) (SENSORS_LIMIT(((val)<0?(((val)-500)/1000):\
+                                       ((val)+500)/1000),-128,127))
+-#define TEMP_FROM_REG(val) (((val)>0x80?(val)-0x100:(val))*1000)
++#define TEMP_FROM_REG(val) ((val) * 1000)
+ #define PWM_TO_REG(val)   ((val) >> 1)
+ #define PWM_FROM_REG(val) (((val)&0x7f) << 1)
+@@ -261,9 +261,9 @@
+       u8 has_fan;             /* Bitfield, fans enabled */
+       u16 fan[5];             /* Register values, possibly combined */
+       u16 fan_min[5];         /* Register values, possibly combined */
+-      u8 temp[3];             /* Register value */
+-      u8 temp_high[3];        /* Register value */
+-      u8 temp_low[3];         /* Register value */
++      s8 temp[3];             /* Register value */
++      s8 temp_high[3];        /* Register value */
++      s8 temp_low[3];         /* Register value */
+       u8 sensor;              /* Register value */
+       u8 fan_div[3];          /* Register encoding, shifted right */
+       u8 vid;                 /* Register encoding, combined */
+--- kernel-maemo-2.6.28.test.orig/drivers/i2c/busses/i2c-acorn.c
++++ kernel-maemo-2.6.28.test/drivers/i2c/busses/i2c-acorn.c
+@@ -84,6 +84,7 @@
+ static struct i2c_adapter ioc_ops = {
+       .id                     = I2C_HW_B_IOC,
++      .nr                     = 0,
+       .algo_data              = &ioc_data,
+ };
+@@ -91,7 +92,7 @@
+ {
+       force_ones = FORCE_ONES | SCL | SDA;
+-      return i2c_bit_add_bus(&ioc_ops);
++      return i2c_bit_add_numbered_bus(&ioc_ops);
+ }
+ module_init(i2c_ioc_init);
+--- kernel-maemo-2.6.28.test.orig/drivers/i2c/busses/i2c-amd8111.c
++++ kernel-maemo-2.6.28.test/drivers/i2c/busses/i2c-amd8111.c
+@@ -72,7 +72,7 @@
+ {
+       int timeout = 500;
+-      while (timeout-- && (inb(smbus->base + AMD_EC_SC) & AMD_EC_SC_IBF))
++      while ((inb(smbus->base + AMD_EC_SC) & AMD_EC_SC_IBF) && --timeout)
+               udelay(1);
+       if (!timeout) {
+@@ -88,7 +88,7 @@
+ {
+       int timeout = 500;
+-      while (timeout-- && (~inb(smbus->base + AMD_EC_SC) & AMD_EC_SC_OBF))
++      while ((~inb(smbus->base + AMD_EC_SC) & AMD_EC_SC_OBF) && --timeout)
+               udelay(1);
+       if (!timeout) {
+--- kernel-maemo-2.6.28.test.orig/drivers/i2c/busses/i2c-pxa.c
++++ kernel-maemo-2.6.28.test/drivers/i2c/busses/i2c-pxa.c
+@@ -644,7 +644,7 @@
+       i2c_pxa_start_message(i2c);
+-      while (timeout-- && i2c->msg_num > 0) {
++      while (i2c->msg_num > 0 && --timeout) {
+               i2c_pxa_handler(0, i2c);
+               udelay(10);
+       }
+--- kernel-maemo-2.6.28.test.orig/drivers/i2c/i2c-core.c
++++ kernel-maemo-2.6.28.test/drivers/i2c/i2c-core.c
+@@ -1831,7 +1831,8 @@
+       case I2C_SMBUS_QUICK:
+               msg[0].len = 0;
+               /* Special case: The read/write field is used as data */
+-              msg[0].flags = flags | (read_write==I2C_SMBUS_READ)?I2C_M_RD:0;
++              msg[0].flags = flags | (read_write == I2C_SMBUS_READ ?
++                                      I2C_M_RD : 0);
+               num = 1;
+               break;
+       case I2C_SMBUS_BYTE:
+--- kernel-maemo-2.6.28.test.orig/drivers/ide/hpt366.c
++++ kernel-maemo-2.6.28.test/drivers/ide/hpt366.c
+@@ -114,6 +114,8 @@
+  *   the register setting lists into the table indexed by the clock selected
+  * - set the correct hwif->ultra_mask for each individual chip
+  * - add Ultra and MW DMA mode filtering for the HPT37[24] based SATA cards
++ * - stop resetting HPT370's state machine before each DMA transfer as that has
++ *   caused more harm than good
+  *    Sergei Shtylyov, <sshtylyov@ru.mvista.com> or <source@mvista.com>
+  */
+@@ -133,7 +135,7 @@
+ #define DRV_NAME "hpt366"
+ /* various tuning parameters */
+-#define HPT_RESET_STATE_ENGINE
++#undef        HPT_RESET_STATE_ENGINE
+ #undef        HPT_DELAY_INTERRUPT
+ #define HPT_SERIALIZE_IO      0
+--- kernel-maemo-2.6.28.test.orig/drivers/ide/ide-cd.c
++++ kernel-maemo-2.6.28.test/drivers/ide/ide-cd.c
+@@ -984,6 +984,9 @@
+               if (blk_fs_request(rq)) {
+                       ide_end_request(drive, 1, rq->nr_sectors);
+                       return ide_stopped;
++              } else if (rq->cmd_type == REQ_TYPE_ATA_PC && !rq->bio) {
++                      ide_end_request(drive, 1, 1);
++                      return ide_stopped;
+               }
+               goto end_request;
+       }
+--- kernel-maemo-2.6.28.test.orig/drivers/ide/ide-io.c
++++ kernel-maemo-2.6.28.test/drivers/ide/ide-io.c
+@@ -577,11 +577,14 @@
+       if (hwif->sg_mapped)    /* needed by ide-scsi */
+               return;
+-      if (rq->cmd_type != REQ_TYPE_ATA_TASKFILE) {
+-              hwif->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
+-      } else {
++      if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
+               sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE);
+               hwif->sg_nents = 1;
++      } else if (!rq->bio) {
++              sg_init_one(sg, rq->data, rq->data_len);
++              hwif->sg_nents = 1;
++      } else {
++              hwif->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
+       }
+ }
+--- kernel-maemo-2.6.28.test.orig/drivers/ide/ide-iops.c
++++ kernel-maemo-2.6.28.test/drivers/ide/ide-iops.c
+@@ -324,6 +324,8 @@
+       u8 io_32bit = drive->io_32bit;
+       u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
++      len++;
++
+       if (io_32bit) {
+               unsigned long uninitialized_var(flags);
+--- kernel-maemo-2.6.28.test.orig/drivers/ide/it821x.c
++++ kernel-maemo-2.6.28.test/drivers/ide/it821x.c
+@@ -68,6 +68,8 @@
+ #define DRV_NAME "it821x"
++#define QUIRK_VORTEX86 1
++
+ struct it821x_dev
+ {
+       unsigned int smart:1,           /* Are we in smart raid mode */
+@@ -79,6 +81,7 @@
+       u16     pio[2];                 /* Cached PIO values */
+       u16     mwdma[2];               /* Cached MWDMA values */
+       u16     udma[2];                /* Cached UDMA values (per drive) */
++      u16     quirks;
+ };
+ #define ATA_66                0
+@@ -580,6 +583,12 @@
+       hwif->ultra_mask = ATA_UDMA6;
+       hwif->mwdma_mask = ATA_MWDMA2;
++
++      /* Vortex86SX quirk: prevent Ultra-DMA mode to fix BadCRC issue */
++      if (idev->quirks & QUIRK_VORTEX86) {
++              if (dev->revision == 0x11)
++                      hwif->ultra_mask = 0;
++      }
+ }
+ static void it8212_disable_raid(struct pci_dev *dev)
+@@ -652,6 +661,8 @@
+               return -ENOMEM;
+       }
++      itdevs->quirks = id->driver_data;
++
+       rc = ide_pci_init_one(dev, &it821x_chipset, itdevs);
+       if (rc)
+               kfree(itdevs);
+@@ -671,6 +682,7 @@
+ static const struct pci_device_id it821x_pci_tbl[] = {
+       { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8211), 0 },
+       { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8212), 0 },
++      { PCI_VDEVICE(RDC, PCI_DEVICE_ID_RDC_D1010), QUIRK_VORTEX86 },
+       { 0, },
+ };
+--- kernel-maemo-2.6.28.test.orig/drivers/ide/tx4938ide.c
++++ kernel-maemo-2.6.28.test/drivers/ide/tx4938ide.c
+@@ -181,7 +181,7 @@
+       while (count--)
+               *ptr++ = cpu_to_le16(__raw_readw((void __iomem *)port));
+-      __ide_flush_dcache_range((unsigned long)buf, count * 2);
++      __ide_flush_dcache_range((unsigned long)buf, roundup(len, 2));
+ }
+ static void tx4938ide_output_data_swap(ide_drive_t *drive, struct request *rq,
+@@ -195,7 +195,7 @@
+               __raw_writew(le16_to_cpu(*ptr), (void __iomem *)port);
+               ptr++;
+       }
+-      __ide_flush_dcache_range((unsigned long)buf, count * 2);
++      __ide_flush_dcache_range((unsigned long)buf, roundup(len, 2));
+ }
+ static const struct ide_tp_ops tx4938ide_tp_ops = {
+--- kernel-maemo-2.6.28.test.orig/drivers/ide/tx4939ide.c
++++ kernel-maemo-2.6.28.test/drivers/ide/tx4939ide.c
+@@ -259,6 +259,12 @@
+                       bcount = 0x10000 - (cur_addr & 0xffff);
+                       if (bcount > cur_len)
+                               bcount = cur_len;
++                      /*
++                       * This workaround for zero count seems required.
++                       * (standard ide_build_dmatable do it too)
++                       */
++                      if ((bcount & 0xffff) == 0x0000)
++                              bcount = 0x8000;
+                       *table++ = bcount & 0xffff;
+                       *table++ = cur_addr;
+                       cur_addr += bcount;
+@@ -558,7 +564,7 @@
+       while (count--)
+               *ptr++ = cpu_to_le16(__raw_readw((void __iomem *)port));
+-      __ide_flush_dcache_range((unsigned long)buf, count * 2);
++      __ide_flush_dcache_range((unsigned long)buf, roundup(len, 2));
+ }
+ static void tx4939ide_output_data_swap(ide_drive_t *drive, struct request *rq,
+@@ -572,7 +578,7 @@
+               __raw_writew(le16_to_cpu(*ptr), (void __iomem *)port);
+               ptr++;
+       }
+-      __ide_flush_dcache_range((unsigned long)buf, count * 2);
++      __ide_flush_dcache_range((unsigned long)buf, roundup(len, 2));
+ }
+ static const struct ide_tp_ops tx4939ide_tp_ops = {
+--- kernel-maemo-2.6.28.test.orig/drivers/ieee1394/ohci1394.h
++++ kernel-maemo-2.6.28.test/drivers/ieee1394/ohci1394.h
+@@ -26,7 +26,7 @@
+ #define OHCI1394_DRIVER_NAME      "ohci1394"
+-#define OHCI1394_MAX_AT_REQ_RETRIES   0x2
++#define OHCI1394_MAX_AT_REQ_RETRIES   0xf
+ #define OHCI1394_MAX_AT_RESP_RETRIES  0x2
+ #define OHCI1394_MAX_PHYS_RESP_RETRIES        0x8
+ #define OHCI1394_MAX_SELF_ID_ERRORS   16
+--- kernel-maemo-2.6.28.test.orig/drivers/ieee1394/sbp2.c
++++ kernel-maemo-2.6.28.test/drivers/ieee1394/sbp2.c
+@@ -395,6 +395,16 @@
+               .model_id               = SBP2_ROM_VALUE_WILDCARD,
+               .workarounds            = SBP2_WORKAROUND_128K_MAX_TRANS,
+       },
++      /*
++       * iPod 2nd generation: needs 128k max transfer size workaround
++       * iPod 3rd generation: needs fix capacity workaround
++       */
++      {
++              .firmware_revision      = 0x0a2700,
++              .model_id               = 0x000000,
++              .workarounds            = SBP2_WORKAROUND_128K_MAX_TRANS |
++                                        SBP2_WORKAROUND_FIX_CAPACITY,
++      },
+       /* iPod 4th generation */ {
+               .firmware_revision      = 0x0a2700,
+               .model_id               = 0x000021,
+--- kernel-maemo-2.6.28.test.orig/drivers/infiniband/hw/nes/nes_cm.c
++++ kernel-maemo-2.6.28.test/drivers/infiniband/hw/nes/nes_cm.c
+@@ -2495,12 +2495,14 @@
+       int ret = 0;
+       struct nes_vnic *nesvnic;
+       struct nes_device *nesdev;
++      struct nes_ib_device *nesibdev;
+       nesvnic = to_nesvnic(nesqp->ibqp.device);
+       if (!nesvnic)
+               return -EINVAL;
+       nesdev = nesvnic->nesdev;
++      nesibdev = nesvnic->nesibdev;
+       nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
+                       atomic_read(&nesvnic->netdev->refcnt));
+@@ -2512,6 +2514,8 @@
+       } else {
+               /* Need to free the Last Streaming Mode Message */
+               if (nesqp->ietf_frame) {
++                      if (nesqp->lsmm_mr)
++                              nesibdev->ibdev.dereg_mr(nesqp->lsmm_mr);
+                       pci_free_consistent(nesdev->pcidev,
+                                       nesqp->private_data_len+sizeof(struct ietf_mpa_frame),
+                                       nesqp->ietf_frame, nesqp->ietf_frame_pbase);
+@@ -2545,6 +2549,10 @@
+       struct iw_cm_event cm_event;
+       struct nes_hw_qp_wqe *wqe;
+       struct nes_v4_quad nes_quad;
++      struct nes_ib_device *nesibdev;
++      struct ib_mr *ibmr = NULL;
++      struct ib_phys_buf ibphysbuf;
++      struct nes_pd *nespd;
+       u32 crc_value;
+       int ret;
+@@ -2605,6 +2613,26 @@
+       if (cm_id->remote_addr.sin_addr.s_addr !=
+                       cm_id->local_addr.sin_addr.s_addr) {
+               u64temp = (unsigned long)nesqp;
++              nesibdev = nesvnic->nesibdev;
++              nespd = nesqp->nespd;
++              ibphysbuf.addr = nesqp->ietf_frame_pbase;
++              ibphysbuf.size = conn_param->private_data_len +
++                                      sizeof(struct ietf_mpa_frame);
++              ibmr = nesibdev->ibdev.reg_phys_mr((struct ib_pd *)nespd,
++                                              &ibphysbuf, 1,
++                                              IB_ACCESS_LOCAL_WRITE,
++                                              (u64 *)&nesqp->ietf_frame);
++              if (!ibmr) {
++                      nes_debug(NES_DBG_CM, "Unable to register memory region"
++                                      "for lSMM for cm_node = %p \n",
++                                      cm_node);
++                      return -ENOMEM;
++              }
++
++              ibmr->pd = &nespd->ibpd;
++              ibmr->device = nespd->ibpd.device;
++              nesqp->lsmm_mr = ibmr;
++
+               u64temp |= NES_SW_CONTEXT_ALIGN>>1;
+               set_wqe_64bit_value(wqe->wqe_words,
+                       NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX,
+@@ -2615,14 +2643,13 @@
+               wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] =
+                       cpu_to_le32(conn_param->private_data_len +
+                       sizeof(struct ietf_mpa_frame));
+-              wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_LOW_IDX] =
+-                      cpu_to_le32((u32)nesqp->ietf_frame_pbase);
+-              wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX] =
+-                      cpu_to_le32((u32)((u64)nesqp->ietf_frame_pbase >> 32));
++              set_wqe_64bit_value(wqe->wqe_words,
++                                      NES_IWARP_SQ_WQE_FRAG0_LOW_IDX,
++                                      (u64)nesqp->ietf_frame);
+               wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] =
+                       cpu_to_le32(conn_param->private_data_len +
+                       sizeof(struct ietf_mpa_frame));
+-              wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0;
++              wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = ibmr->lkey;
+               nesqp->nesqp_context->ird_ord_sizes |=
+                       cpu_to_le32(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT |
+--- kernel-maemo-2.6.28.test.orig/drivers/infiniband/hw/nes/nes_verbs.c
++++ kernel-maemo-2.6.28.test/drivers/infiniband/hw/nes/nes_verbs.c
+@@ -1360,8 +1360,10 @@
+                                       NES_QPCONTEXT_MISC_RQ_SIZE_SHIFT);
+                       nesqp->nesqp_context->misc |= cpu_to_le32((u32)nesqp->hwqp.sq_encoded_size <<
+                                       NES_QPCONTEXT_MISC_SQ_SIZE_SHIFT);
++                      if (!udata) {
+                               nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_PRIV_EN);
+                               nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_FAST_REGISTER_EN);
++                      }
+                       nesqp->nesqp_context->cqs = cpu_to_le32(nesqp->nesscq->hw_cq.cq_number +
+                                       ((u32)nesqp->nesrcq->hw_cq.cq_number << 16));
+                       u64temp = (u64)nesqp->hwqp.sq_pbase;
+--- kernel-maemo-2.6.28.test.orig/drivers/infiniband/hw/nes/nes_verbs.h
++++ kernel-maemo-2.6.28.test/drivers/infiniband/hw/nes/nes_verbs.h
+@@ -134,6 +134,7 @@
+       struct ietf_mpa_frame *ietf_frame;
+       dma_addr_t            ietf_frame_pbase;
+       wait_queue_head_t     state_waitq;
++      struct ib_mr          *lsmm_mr;
+       unsigned long         socket;
+       struct nes_hw_qp      hwqp;
+       struct work_struct    work;
+--- kernel-maemo-2.6.28.test.orig/drivers/input/gameport/gameport.c
++++ kernel-maemo-2.6.28.test/drivers/input/gameport/gameport.c
+@@ -50,9 +50,8 @@
+ static struct bus_type gameport_bus;
+-static void gameport_add_driver(struct gameport_driver *drv);
+ static void gameport_add_port(struct gameport *gameport);
+-static void gameport_destroy_port(struct gameport *gameport);
++static void gameport_attach_driver(struct gameport_driver *drv);
+ static void gameport_reconnect_port(struct gameport *gameport);
+ static void gameport_disconnect_port(struct gameport *gameport);
+@@ -230,7 +229,6 @@
+ enum gameport_event_type {
+       GAMEPORT_REGISTER_PORT,
+-      GAMEPORT_REGISTER_DRIVER,
+       GAMEPORT_ATTACH_DRIVER,
+ };
+@@ -374,8 +372,8 @@
+                               gameport_add_port(event->object);
+                               break;
+-                      case GAMEPORT_REGISTER_DRIVER:
+-                              gameport_add_driver(event->object);
++                      case GAMEPORT_ATTACH_DRIVER:
++                              gameport_attach_driver(event->object);
+                               break;
+                       default:
+@@ -707,14 +705,14 @@
+       return 0;
+ }
+-static void gameport_add_driver(struct gameport_driver *drv)
++static void gameport_attach_driver(struct gameport_driver *drv)
+ {
+       int error;
+-      error = driver_register(&drv->driver);
++      error = driver_attach(&drv->driver);
+       if (error)
+               printk(KERN_ERR
+-                      "gameport: driver_register() failed for %s, error: %d\n",
++                      "gameport: driver_attach() failed for %s, error: %d\n",
+                       drv->driver.name, error);
+ }
+--- kernel-maemo-2.6.28.test.orig/drivers/input/keyboard/atkbd.c
++++ kernel-maemo-2.6.28.test/drivers/input/keyboard/atkbd.c
+@@ -884,6 +884,22 @@
+ }
+ /*
++ * Samsung NC10 with Fn+F? key release not working
++ */
++static void atkbd_samsung_keymap_fixup(struct atkbd *atkbd)
++{
++      const unsigned int forced_release_keys[] = {
++              0x82, 0x83, 0x84, 0x86, 0x88, 0x89, 0xb3, 0xf7, 0xf9,
++      };
++      int i;
++
++      if (atkbd->set == 2)
++              for (i = 0; i < ARRAY_SIZE(forced_release_keys); i++)
++                      __set_bit(forced_release_keys[i],
++                                atkbd->force_release_mask);
++}
++
++/*
+  * atkbd_set_keycode_table() initializes keyboard's keycode table
+  * according to the selected scancode set
+  */
+@@ -1493,6 +1509,15 @@
+               .callback = atkbd_setup_fixup,
+               .driver_data = atkbd_inventec_keymap_fixup,
+       },
++      {
++              .ident = "Samsung NC10",
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "NC10"),
++              },
++              .callback = atkbd_setup_fixup,
++              .driver_data = atkbd_samsung_keymap_fixup,
++      },
+       { }
+ };
+--- kernel-maemo-2.6.28.test.orig/drivers/isdn/gigaset/bas-gigaset.c
++++ kernel-maemo-2.6.28.test/drivers/isdn/gigaset/bas-gigaset.c
+@@ -46,6 +46,9 @@
+ /* length limit according to Siemens 3070usb-protokoll.doc ch. 2.1 */
+ #define IF_WRITEBUF 264
++/* interrupt pipe message size according to ibid. ch. 2.2 */
++#define IP_MSGSIZE 3
++
+ /* Values for the Gigaset 307x */
+ #define USB_GIGA_VENDOR_ID      0x0681
+ #define USB_3070_PRODUCT_ID     0x0001
+@@ -110,7 +113,7 @@
+       unsigned char           *rcvbuf;        /* AT reply receive buffer */
+       struct urb              *urb_int_in;    /* URB for interrupt pipe */
+-      unsigned char           int_in_buf[3];
++      unsigned char           *int_in_buf;
+       spinlock_t              lock;           /* locks all following */
+       int                     basstate;       /* bitmap (BS_*) */
+@@ -657,7 +660,7 @@
+       }
+       /* drop incomplete packets even if the missing bytes wouldn't matter */
+-      if (unlikely(urb->actual_length < 3)) {
++      if (unlikely(urb->actual_length < IP_MSGSIZE)) {
+               dev_warn(cs->dev, "incomplete interrupt packet (%d bytes)\n",
+                        urb->actual_length);
+               goto resubmit;
+@@ -2127,6 +2130,7 @@
+ static void gigaset_freecshw(struct cardstate *cs)
+ {
+       /* timers, URBs and rcvbuf are disposed of in disconnect */
++      kfree(cs->hw.bas->int_in_buf);
+       kfree(cs->hw.bas);
+       cs->hw.bas = NULL;
+ }
+@@ -2232,6 +2236,12 @@
+               }
+               hostif = interface->cur_altsetting;
+       }
++      ucs->int_in_buf = kmalloc(IP_MSGSIZE, GFP_KERNEL);
++      if (!ucs->int_in_buf) {
++              kfree(ucs);
++              pr_err("out of memory\n");
++              return 0;
++      }
+       /* Reject application specific interfaces
+        */
+@@ -2290,7 +2300,7 @@
+       usb_fill_int_urb(ucs->urb_int_in, udev,
+                        usb_rcvintpipe(udev,
+                                       (endpoint->bEndpointAddress) & 0x0f),
+-                       ucs->int_in_buf, 3, read_int_callback, cs,
++                       ucs->int_in_buf, IP_MSGSIZE, read_int_callback, cs,
+                        endpoint->bInterval);
+       if ((rc = usb_submit_urb(ucs->urb_int_in, GFP_KERNEL)) != 0) {
+               dev_err(cs->dev, "could not submit interrupt URB: %s\n",
+--- kernel-maemo-2.6.28.test.orig/drivers/md/bitmap.c
++++ kernel-maemo-2.6.28.test/drivers/md/bitmap.c
+@@ -964,9 +964,11 @@
+                                */
+                               page = bitmap->sb_page;
+                               offset = sizeof(bitmap_super_t);
+-                              read_sb_page(bitmap->mddev, bitmap->offset,
+-                                           page,
+-                                           index, count);
++                              if (!file)
++                                      read_sb_page(bitmap->mddev,
++                                                   bitmap->offset,
++                                                   page,
++                                                   index, count);
+                       } else if (file) {
+                               page = read_page(file, index, bitmap, count);
+                               offset = 0;
+--- kernel-maemo-2.6.28.test.orig/drivers/md/dm-crypt.c
++++ kernel-maemo-2.6.28.test/drivers/md/dm-crypt.c
+@@ -60,6 +60,7 @@
+ };
+ struct dm_crypt_request {
++      struct convert_context *ctx;
+       struct scatterlist sg_in;
+       struct scatterlist sg_out;
+ };
+@@ -335,6 +336,18 @@
+       init_completion(&ctx->restart);
+ }
++static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
++                                           struct ablkcipher_request *req)
++{
++      return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
++}
++
++static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc,
++                                             struct dm_crypt_request *dmreq)
++{
++      return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start);
++}
++
+ static int crypt_convert_block(struct crypt_config *cc,
+                              struct convert_context *ctx,
+                              struct ablkcipher_request *req)
+@@ -345,10 +358,11 @@
+       u8 *iv;
+       int r = 0;
+-      dmreq = (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
++      dmreq = dmreq_of_req(cc, req);
+       iv = (u8 *)ALIGN((unsigned long)(dmreq + 1),
+                        crypto_ablkcipher_alignmask(cc->tfm) + 1);
++      dmreq->ctx = ctx;
+       sg_init_table(&dmreq->sg_in, 1);
+       sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
+                   bv_in->bv_offset + ctx->offset_in);
+@@ -395,8 +409,9 @@
+               cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
+       ablkcipher_request_set_tfm(cc->req, cc->tfm);
+       ablkcipher_request_set_callback(cc->req, CRYPTO_TFM_REQ_MAY_BACKLOG |
+-                                           CRYPTO_TFM_REQ_MAY_SLEEP,
+-                                           kcryptd_async_done, ctx);
++                                      CRYPTO_TFM_REQ_MAY_SLEEP,
++                                      kcryptd_async_done,
++                                      dmreq_of_req(cc, cc->req));
+ }
+ /*
+@@ -553,19 +568,22 @@
+ static void crypt_dec_pending(struct dm_crypt_io *io)
+ {
+       struct crypt_config *cc = io->target->private;
++      struct bio *base_bio = io->base_bio;
++      struct dm_crypt_io *base_io = io->base_io;
++      int error = io->error;
+       if (!atomic_dec_and_test(&io->pending))
+               return;
+-      if (likely(!io->base_io))
+-              bio_endio(io->base_bio, io->error);
++      mempool_free(io, cc->io_pool);
++
++      if (likely(!base_io))
++              bio_endio(base_bio, error);
+       else {
+-              if (io->error && !io->base_io->error)
+-                      io->base_io->error = io->error;
+-              crypt_dec_pending(io->base_io);
++              if (error && !base_io->error)
++                      base_io->error = error;
++              crypt_dec_pending(base_io);
+       }
+-
+-      mempool_free(io, cc->io_pool);
+ }
+ /*
+@@ -821,7 +839,8 @@
+ static void kcryptd_async_done(struct crypto_async_request *async_req,
+                              int error)
+ {
+-      struct convert_context *ctx = async_req->data;
++      struct dm_crypt_request *dmreq = async_req->data;
++      struct convert_context *ctx = dmreq->ctx;
+       struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
+       struct crypt_config *cc = io->target->private;
+@@ -830,7 +849,7 @@
+               return;
+       }
+-      mempool_free(ablkcipher_request_cast(async_req), cc->req_pool);
++      mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool);
+       if (!atomic_dec_and_test(&ctx->pending))
+               return;
+--- kernel-maemo-2.6.28.test.orig/drivers/md/dm-io.c
++++ kernel-maemo-2.6.28.test/drivers/md/dm-io.c
+@@ -292,6 +292,8 @@
+                                            (PAGE_SIZE >> SECTOR_SHIFT));
+               num_bvecs = 1 + min_t(int, bio_get_nr_vecs(where->bdev),
+                                     num_bvecs);
++              if (unlikely(num_bvecs > BIO_MAX_PAGES))
++                      num_bvecs = BIO_MAX_PAGES;
+               bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
+               bio->bi_sector = where->sector + (where->count - remaining);
+               bio->bi_bdev = where->bdev;
+--- kernel-maemo-2.6.28.test.orig/drivers/md/dm-ioctl.c
++++ kernel-maemo-2.6.28.test/drivers/md/dm-ioctl.c
+@@ -704,7 +704,8 @@
+       char *new_name = (char *) param + param->data_start;
+       if (new_name < param->data ||
+-          invalid_str(new_name, (void *) param + param_size)) {
++          invalid_str(new_name, (void *) param + param_size) ||
++          strlen(new_name) > DM_NAME_LEN - 1) {
+               DMWARN("Invalid new logical volume name supplied.");
+               return -EINVAL;
+       }
+--- kernel-maemo-2.6.28.test.orig/drivers/md/dm-log.c
++++ kernel-maemo-2.6.28.test/drivers/md/dm-log.c
+@@ -467,6 +467,7 @@
+               lc->disk_header = vmalloc(buf_size);
+               if (!lc->disk_header) {
+                       DMWARN("couldn't allocate disk log buffer");
++                      dm_io_client_destroy(lc->io_req.client);
+                       kfree(lc);
+                       return -ENOMEM;
+               }
+@@ -482,6 +483,8 @@
+               DMWARN("couldn't allocate sync bitset");
+               if (!dev)
+                       vfree(lc->clean_bits);
++              else
++                      dm_io_client_destroy(lc->io_req.client);
+               vfree(lc->disk_header);
+               kfree(lc);
+               return -ENOMEM;
+@@ -495,6 +498,8 @@
+               vfree(lc->sync_bits);
+               if (!dev)
+                       vfree(lc->clean_bits);
++              else
++                      dm_io_client_destroy(lc->io_req.client);
+               vfree(lc->disk_header);
+               kfree(lc);
+               return -ENOMEM;
+--- kernel-maemo-2.6.28.test.orig/drivers/md/dm-raid1.c
++++ kernel-maemo-2.6.28.test/drivers/md/dm-raid1.c
+@@ -197,9 +197,6 @@
+       struct mirror_set *ms = m->ms;
+       struct mirror *new;
+-      if (!errors_handled(ms))
+-              return;
+-
+       /*
+        * error_count is used for nothing more than a
+        * simple way to tell if a device has encountered
+@@ -210,6 +207,9 @@
+       if (test_and_set_bit(error_type, &m->error_type))
+               return;
++      if (!errors_handled(ms))
++              return;
++
+       if (m != get_default_mirror(ms))
+               goto out;
+--- kernel-maemo-2.6.28.test.orig/drivers/md/dm.c
++++ kernel-maemo-2.6.28.test/drivers/md/dm.c
+@@ -480,9 +480,12 @@
+ static void dec_pending(struct dm_io *io, int error)
+ {
+       unsigned long flags;
++      int io_error;
++      struct bio *bio;
++      struct mapped_device *md = io->md;
+       /* Push-back supersedes any I/O errors */
+-      if (error && !(io->error > 0 && __noflush_suspending(io->md)))
++      if (error && !(io->error > 0 && __noflush_suspending(md)))
+               io->error = error;
+       if (atomic_dec_and_test(&io->io_count)) {
+@@ -492,25 +495,28 @@
+                        * This must be handled before the sleeper on
+                        * suspend queue merges the pushback list.
+                        */
+-                      spin_lock_irqsave(&io->md->pushback_lock, flags);
+-                      if (__noflush_suspending(io->md))
+-                              bio_list_add(&io->md->pushback, io->bio);
++                      spin_lock_irqsave(&md->pushback_lock, flags);
++                      if (__noflush_suspending(md))
++                              bio_list_add(&md->pushback, io->bio);
+                       else
+                               /* noflush suspend was interrupted. */
+                               io->error = -EIO;
+-                      spin_unlock_irqrestore(&io->md->pushback_lock, flags);
++                      spin_unlock_irqrestore(&md->pushback_lock, flags);
+               }
+               end_io_acct(io);
+-              if (io->error != DM_ENDIO_REQUEUE) {
+-                      blk_add_trace_bio(io->md->queue, io->bio,
++              io_error = io->error;
++              bio = io->bio;
++
++              free_io(md, io);
++
++              if (io_error != DM_ENDIO_REQUEUE) {
++                      blk_add_trace_bio(md->queue, io->bio,
+                                         BLK_TA_COMPLETE);
+-                      bio_endio(io->bio, io->error);
++                      bio_endio(bio, io_error);
+               }
+-
+-              free_io(io->md, io);
+       }
+ }
+@@ -518,6 +524,7 @@
+ {
+       int r = 0;
+       struct dm_target_io *tio = bio->bi_private;
++      struct dm_io *io = tio->io;
+       struct mapped_device *md = tio->io->md;
+       dm_endio_fn endio = tio->ti->type->end_io;
+@@ -541,15 +548,14 @@
+               }
+       }
+-      dec_pending(tio->io, error);
+-
+       /*
+        * Store md for cleanup instead of tio which is about to get freed.
+        */
+       bio->bi_private = md->bs;
+-      bio_put(bio);
+       free_tio(md, tio);
++      bio_put(bio);
++      dec_pending(io, error);
+ }
+ static sector_t max_io_len(struct mapped_device *md,
+--- kernel-maemo-2.6.28.test.orig/drivers/md/linear.c
++++ kernel-maemo-2.6.28.test/drivers/md/linear.c
+@@ -25,13 +25,13 @@
+ {
+       dev_info_t *hash;
+       linear_conf_t *conf = mddev_to_conf(mddev);
++      sector_t idx = sector >> conf->sector_shift;
+       /*
+        * sector_div(a,b) returns the remainer and sets a to a/b
+        */
+-      sector >>= conf->sector_shift;
+-      (void)sector_div(sector, conf->spacing);
+-      hash = conf->hash_table[sector];
++      (void)sector_div(idx, conf->spacing);
++      hash = conf->hash_table[idx];
+       while (sector >= hash->num_sectors + hash->start_sector)
+               hash++;
+--- kernel-maemo-2.6.28.test.orig/drivers/md/md.c
++++ kernel-maemo-2.6.28.test/drivers/md/md.c
+@@ -1447,6 +1447,11 @@
+               if (find_rdev_nr(mddev, rdev->desc_nr))
+                       return -EBUSY;
+       }
++      if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
++              printk(KERN_WARNING "md: %s: array is limited to %d devices\n",
++                     mdname(mddev), mddev->max_disks);
++              return -EBUSY;
++      }
+       bdevname(rdev->bdev,b);
+       while ( (s=strchr(b, '/')) != NULL)
+               *s = '!';
+@@ -2355,6 +2360,15 @@
+       i = 0;
+       rdev_for_each(rdev, tmp, mddev) {
++              if (rdev->desc_nr >= mddev->max_disks ||
++                  i > mddev->max_disks) {
++                      printk(KERN_WARNING
++                             "md: %s: %s: only %d devices permitted\n",
++                             mdname(mddev), bdevname(rdev->bdev, b),
++                             mddev->max_disks);
++                      kick_rdev_from_array(rdev);
++                      continue;
++              }
+               if (rdev != freshest)
+                       if (super_types[mddev->major_version].
+                           validate_super(mddev, rdev)) {
+@@ -3680,6 +3694,10 @@
+               return err;
+       }
+       if (mddev->pers->sync_request) {
++              /* wait for any previously scheduled redundancy groups
++               * to be removed
++               */
++              flush_scheduled_work();
+               if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
+                       printk(KERN_WARNING
+                              "md: cannot register extra attributes for %s\n",
+@@ -3810,6 +3828,14 @@
+       spin_unlock(&inode->i_lock);
+ }
++
++static void sysfs_delayed_rm(struct work_struct *ws)
++{
++      mddev_t *mddev = container_of(ws, mddev_t, del_work);
++
++      sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
++}
++
+ /* mode:
+  *   0 - completely stop and dis-assemble array
+  *   1 - switch to readonly
+@@ -3819,6 +3845,7 @@
+ {
+       int err = 0;
+       struct gendisk *disk = mddev->gendisk;
++      int remove_group = 0;
+       if (atomic_read(&mddev->openers) > is_open) {
+               printk("md: %s still in use.\n",mdname(mddev));
+@@ -3854,10 +3881,9 @@
+                       mddev->queue->merge_bvec_fn = NULL;
+                       mddev->queue->unplug_fn = NULL;
+                       mddev->queue->backing_dev_info.congested_fn = NULL;
+-                      if (mddev->pers->sync_request)
+-                              sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
+-
+                       module_put(mddev->pers->owner);
++                      if (mddev->pers->sync_request)
++                              remove_group = 1;
+                       mddev->pers = NULL;
+                       /* tell userspace to handle 'inactive' */
+                       sysfs_notify_dirent(mddev->sysfs_state);
+@@ -3905,6 +3931,15 @@
+               /* make sure all md_delayed_delete calls have finished */
+               flush_scheduled_work();
++              /* we can't wait for group removal under mddev_lock as
++               * threads holding the group 'active' need to acquire
++               * mddev_lock before going inactive
++               */
++              if (remove_group) {
++                      INIT_WORK(&mddev->del_work, sysfs_delayed_rm);
++                      schedule_work(&mddev->del_work);
++              }
++
+               export_array(mddev);
+               mddev->array_sectors = 0;
+@@ -4448,13 +4483,6 @@
+        * noticed in interrupt contexts ...
+        */
+-      if (rdev->desc_nr == mddev->max_disks) {
+-              printk(KERN_WARNING "%s: can not hot-add to full array!\n",
+-                      mdname(mddev));
+-              err = -EBUSY;
+-              goto abort_unbind_export;
+-      }
+-
+       rdev->raid_disk = -1;
+       md_update_sb(mddev, 1);
+@@ -4468,9 +4496,6 @@
+       md_new_event(mddev);
+       return 0;
+-abort_unbind_export:
+-      unbind_rdev_from_array(rdev);
+-
+ abort_export:
+       export_rdev(rdev);
+       return err;
+--- kernel-maemo-2.6.28.test.orig/drivers/md/raid1.c
++++ kernel-maemo-2.6.28.test/drivers/md/raid1.c
+@@ -1233,8 +1233,9 @@
+       update_head_pos(mirror, r1_bio);
+       if (atomic_dec_and_test(&r1_bio->remaining)) {
+-              md_done_sync(mddev, r1_bio->sectors, uptodate);
++              sector_t s = r1_bio->sectors;
+               put_buf(r1_bio);
++              md_done_sync(mddev, s, uptodate);
+       }
+ }
+--- kernel-maemo-2.6.28.test.orig/drivers/md/raid10.c
++++ kernel-maemo-2.6.28.test/drivers/md/raid10.c
+@@ -1236,6 +1236,7 @@
+       /* for reconstruct, we always reschedule after a read.
+        * for resync, only after all reads
+        */
++      rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
+       if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
+           atomic_dec_and_test(&r10_bio->remaining)) {
+               /* we have read all the blocks,
+@@ -1243,7 +1244,6 @@
+                */
+               reschedule_retry(r10_bio);
+       }
+-      rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
+ }
+ static void end_sync_write(struct bio *bio, int error)
+@@ -1264,11 +1264,13 @@
+       update_head_pos(i, r10_bio);
++      rdev_dec_pending(conf->mirrors[d].rdev, mddev);
+       while (atomic_dec_and_test(&r10_bio->remaining)) {
+               if (r10_bio->master_bio == NULL) {
+                       /* the primary of several recovery bios */
+-                      md_done_sync(mddev, r10_bio->sectors, 1);
++                      sector_t s = r10_bio->sectors;
+                       put_buf(r10_bio);
++                      md_done_sync(mddev, s, 1);
+                       break;
+               } else {
+                       r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio;
+@@ -1276,7 +1278,6 @@
+                       r10_bio = r10_bio2;
+               }
+       }
+-      rdev_dec_pending(conf->mirrors[d].rdev, mddev);
+ }
+ /*
+@@ -1749,8 +1750,6 @@
+       if (!go_faster && conf->nr_waiting)
+               msleep_interruptible(1000);
+-      bitmap_cond_end_sync(mddev->bitmap, sector_nr);
+-
+       /* Again, very different code for resync and recovery.
+        * Both must result in an r10bio with a list of bios that
+        * have bi_end_io, bi_sector, bi_bdev set,
+@@ -1886,6 +1885,8 @@
+               /* resync. Schedule a read for every block at this virt offset */
+               int count = 0;
++              bitmap_cond_end_sync(mddev->bitmap, sector_nr);
++
+               if (!bitmap_start_sync(mddev->bitmap, sector_nr,
+                                      &sync_blocks, mddev->degraded) &&
+                   !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
+@@ -2010,13 +2011,13 @@
+       /* There is nowhere to write, so all non-sync
+        * drives must be failed, so try the next chunk...
+        */
+-      {
+-      sector_t sec = max_sector - sector_nr;
+-      sectors_skipped += sec;
++      if (sector_nr + max_sync < max_sector)
++              max_sector = sector_nr + max_sync;
++
++      sectors_skipped += (max_sector - sector_nr);
+       chunks_skipped ++;
+       sector_nr = max_sector;
+       goto skipped;
+-      }
+ }
+ static int run(mddev_t *mddev)
+--- kernel-maemo-2.6.28.test.orig/drivers/media/common/tuners/tda8290.c
++++ kernel-maemo-2.6.28.test/drivers/media/common/tuners/tda8290.c
+@@ -724,7 +724,8 @@
+       fe->ops.analog_ops.info.name = name;
+       if (priv->ver & TDA8290) {
+-              tda8290_init_tuner(fe);
++              if (priv->ver & (TDA8275 | TDA8275A))
++                      tda8290_init_tuner(fe);
+               tda8290_init_if(fe);
+       } else if (priv->ver & TDA8295)
+               tda8295_init_if(fe);
+--- kernel-maemo-2.6.28.test.orig/drivers/media/dvb/frontends/s5h1409.c
++++ kernel-maemo-2.6.28.test/drivers/media/dvb/frontends/s5h1409.c
+@@ -545,9 +545,6 @@
+       s5h1409_enable_modulation(fe, p->u.vsb.modulation);
+-      /* Allow the demod to settle */
+-      msleep(100);
+-
+       if (fe->ops.tuner_ops.set_params) {
+               if (fe->ops.i2c_gate_ctrl)
+                       fe->ops.i2c_gate_ctrl(fe, 1);
+@@ -562,6 +559,10 @@
+               s5h1409_set_qam_interleave_mode(fe);
+       }
++      /* Issue a reset to the demod so it knows to resync against the
++         newly tuned frequency */
++      s5h1409_softreset(fe);
++
+       return 0;
+ }
+--- kernel-maemo-2.6.28.test.orig/drivers/media/video/cx23885/cx23885-417.c
++++ kernel-maemo-2.6.28.test/drivers/media/video/cx23885/cx23885-417.c
+@@ -1585,7 +1585,8 @@
+       lock_kernel();
+       list_for_each(list, &cx23885_devlist) {
+               h = list_entry(list, struct cx23885_dev, devlist);
+-              if (h->v4l_device->minor == minor) {
++              if (h->v4l_device &&
++                  h->v4l_device->minor == minor) {
+                       dev = h;
+                       break;
+               }
+--- kernel-maemo-2.6.28.test.orig/drivers/media/video/cx23885/cx23885-video.c
++++ kernel-maemo-2.6.28.test/drivers/media/video/cx23885/cx23885-video.c
+@@ -730,12 +730,13 @@
+       lock_kernel();
+       list_for_each(list, &cx23885_devlist) {
+               h = list_entry(list, struct cx23885_dev, devlist);
+-              if (h->video_dev->minor == minor) {
++              if (h->video_dev &&
++                  h->video_dev->minor == minor) {
+                       dev  = h;
+                       type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+               }
+               if (h->vbi_dev &&
+-                 h->vbi_dev->minor == minor) {
++                  h->vbi_dev->minor == minor) {
+                       dev  = h;
+                       type = V4L2_BUF_TYPE_VBI_CAPTURE;
+               }
+--- kernel-maemo-2.6.28.test.orig/drivers/media/video/ivtv/ivtv-ioctl.c
++++ kernel-maemo-2.6.28.test/drivers/media/video/ivtv/ivtv-ioctl.c
+@@ -1750,6 +1750,18 @@
+               break;
+       }
++      case IVTV_IOC_DMA_FRAME:
++      case VIDEO_GET_PTS:
++      case VIDEO_GET_FRAME_COUNT:
++      case VIDEO_GET_EVENT:
++      case VIDEO_PLAY:
++      case VIDEO_STOP:
++      case VIDEO_FREEZE:
++      case VIDEO_CONTINUE:
++      case VIDEO_COMMAND:
++      case VIDEO_TRY_COMMAND:
++              return ivtv_decoder_ioctls(file, cmd, (void *)arg);
++
+       default:
+               return -EINVAL;
+       }
+@@ -1792,18 +1804,6 @@
+               ivtv_vapi(itv, CX2341X_DEC_SET_AUDIO_MODE, 2, itv->audio_bilingual_mode, itv->audio_stereo_mode);
+               return 0;
+-      case IVTV_IOC_DMA_FRAME:
+-      case VIDEO_GET_PTS:
+-      case VIDEO_GET_FRAME_COUNT:
+-      case VIDEO_GET_EVENT:
+-      case VIDEO_PLAY:
+-      case VIDEO_STOP:
+-      case VIDEO_FREEZE:
+-      case VIDEO_CONTINUE:
+-      case VIDEO_COMMAND:
+-      case VIDEO_TRY_COMMAND:
+-              return ivtv_decoder_ioctls(filp, cmd, (void *)arg);
+-
+       default:
+               break;
+       }
+--- kernel-maemo-2.6.28.test.orig/drivers/media/video/saa7127.c
++++ kernel-maemo-2.6.28.test/drivers/media/video/saa7127.c
+@@ -149,7 +149,7 @@
+       { SAA7127_REG_COPYGEN_0,                        0x77 },
+       { SAA7127_REG_COPYGEN_1,                        0x41 },
+       { SAA7127_REG_COPYGEN_2,                        0x00 }, /* Macrovision enable/disable */
+-      { SAA7127_REG_OUTPUT_PORT_CONTROL,              0x9e },
++      { SAA7127_REG_OUTPUT_PORT_CONTROL,              0xbf },
+       { SAA7127_REG_GAIN_LUMINANCE_RGB,               0x00 },
+       { SAA7127_REG_GAIN_COLORDIFF_RGB,               0x00 },
+       { SAA7127_REG_INPUT_PORT_CONTROL_1,             0x80 }, /* for color bars */
+@@ -479,12 +479,18 @@
+               break;
+       case SAA7127_OUTPUT_TYPE_COMPOSITE:
+-              state->reg_2d = 0x08;   /* 00001000 CVBS only, RGB DAC's off (high impedance mode) */
++              if (state->ident == V4L2_IDENT_SAA7129)
++                      state->reg_2d = 0x20;   /* CVBS only */
++              else
++                      state->reg_2d = 0x08;   /* 00001000 CVBS only, RGB DAC's off (high impedance mode) */
+               state->reg_3a = 0x13;   /* by default switch YUV to RGB-matrix on */
+               break;
+       case SAA7127_OUTPUT_TYPE_SVIDEO:
+-              state->reg_2d = 0xff;   /* 11111111  croma -> R, luma -> CVBS + G + B */
++              if (state->ident == V4L2_IDENT_SAA7129)
++                      state->reg_2d = 0x18;   /* Y + C */
++              else
++                      state->reg_2d = 0xff;   /*11111111  croma -> R, luma -> CVBS + G + B */
+               state->reg_3a = 0x13;   /* by default switch YUV to RGB-matrix on */
+               break;
+@@ -499,7 +505,10 @@
+               break;
+       case SAA7127_OUTPUT_TYPE_BOTH:
+-              state->reg_2d = 0xbf;
++              if (state->ident == V4L2_IDENT_SAA7129)
++                      state->reg_2d = 0x38;
++              else
++                      state->reg_2d = 0xbf;
+               state->reg_3a = 0x13;   /* by default switch YUV to RGB-matrix on */
+               break;
+@@ -691,24 +700,6 @@
+       i2c_set_clientdata(client, state);
+-      /* Configure Encoder */
+-
+-      v4l_dbg(1, debug, client, "Configuring encoder\n");
+-      saa7127_write_inittab(client, saa7127_init_config_common);
+-      saa7127_set_std(client, V4L2_STD_NTSC);
+-      saa7127_set_output_type(client, SAA7127_OUTPUT_TYPE_BOTH);
+-      saa7127_set_vps(client, &vbi);
+-      saa7127_set_wss(client, &vbi);
+-      saa7127_set_cc(client, &vbi);
+-      saa7127_set_xds(client, &vbi);
+-      if (test_image == 1)
+-              /* The Encoder has an internal Colorbar generator */
+-              /* This can be used for debugging */
+-              saa7127_set_input_type(client, SAA7127_INPUT_TYPE_TEST_IMAGE);
+-      else
+-              saa7127_set_input_type(client, SAA7127_INPUT_TYPE_NORMAL);
+-      saa7127_set_video_enable(client, 1);
+-
+       if (id->driver_data) {  /* Chip type is already known */
+               state->ident = id->driver_data;
+       } else {                /* Needs detection */
+@@ -730,6 +721,23 @@
+       v4l_info(client, "%s found @ 0x%x (%s)\n", client->name,
+                       client->addr << 1, client->adapter->name);
++
++      v4l_dbg(1, debug, client, "Configuring encoder\n");
++      saa7127_write_inittab(client, saa7127_init_config_common);
++      saa7127_set_std(client, V4L2_STD_NTSC);
++      saa7127_set_output_type(client, SAA7127_OUTPUT_TYPE_BOTH);
++      saa7127_set_vps(client, &vbi);
++      saa7127_set_wss(client, &vbi);
++      saa7127_set_cc(client, &vbi);
++      saa7127_set_xds(client, &vbi);
++      if (test_image == 1)
++              /* The Encoder has an internal Colorbar generator */
++              /* This can be used for debugging */
++              saa7127_set_input_type(client, SAA7127_INPUT_TYPE_TEST_IMAGE);
++      else
++              saa7127_set_input_type(client, SAA7127_INPUT_TYPE_NORMAL);
++      saa7127_set_video_enable(client, 1);
++
+       if (state->ident == V4L2_IDENT_SAA7129)
+               saa7127_write_inittab(client, saa7129_init_config_extra);
+       return 0;
+--- kernel-maemo-2.6.28.test.orig/drivers/misc/acer-wmi.c
++++ kernel-maemo-2.6.28.test/drivers/misc/acer-wmi.c
+@@ -1297,7 +1297,7 @@
+       set_quirks();
+-      if (!acpi_video_backlight_support() && has_cap(ACER_CAP_BRIGHTNESS)) {
++      if (acpi_video_backlight_support() && has_cap(ACER_CAP_BRIGHTNESS)) {
+               interface->capability &= ~ACER_CAP_BRIGHTNESS;
+               printk(ACER_INFO "Brightness must be controlled by "
+                      "generic video driver\n");
+--- kernel-maemo-2.6.28.test.orig/drivers/misc/eeepc-laptop.c
++++ kernel-maemo-2.6.28.test/drivers/misc/eeepc-laptop.c
+@@ -161,6 +161,10 @@
+       {KE_KEY, 0x13, KEY_MUTE },
+       {KE_KEY, 0x14, KEY_VOLUMEDOWN },
+       {KE_KEY, 0x15, KEY_VOLUMEUP },
++      {KE_KEY, 0x1a, KEY_COFFEE },
++      {KE_KEY, 0x1b, KEY_ZOOM },
++      {KE_KEY, 0x1c, KEY_PROG2 },
++      {KE_KEY, 0x1d, KEY_PROG3 },
+       {KE_KEY, 0x30, KEY_SWITCHVIDEOMODE },
+       {KE_KEY, 0x31, KEY_SWITCHVIDEOMODE },
+       {KE_KEY, 0x32, KEY_SWITCHVIDEOMODE },
+@@ -510,7 +514,8 @@
+ static void notify_brn(void)
+ {
+       struct backlight_device *bd = eeepc_backlight_device;
+-      bd->props.brightness = read_brightness(bd);
++      if (bd)
++              bd->props.brightness = read_brightness(bd);
+ }
+ static void eeepc_hotk_notify(acpi_handle handle, u32 event, void *data)
+--- kernel-maemo-2.6.28.test.orig/drivers/misc/hpilo.c
++++ kernel-maemo-2.6.28.test/drivers/misc/hpilo.c
+@@ -710,6 +710,7 @@
+ static struct pci_device_id ilo_devices[] = {
+       { PCI_DEVICE(PCI_VENDOR_ID_COMPAQ, 0xB204) },
++      { PCI_DEVICE(PCI_VENDOR_ID_HP, 0x3307) },
+       { }
+ };
+ MODULE_DEVICE_TABLE(pci, ilo_devices);
+--- kernel-maemo-2.6.28.test.orig/drivers/misc/panasonic-laptop.c
++++ kernel-maemo-2.6.28.test/drivers/misc/panasonic-laptop.c
+@@ -515,7 +515,7 @@
+       hkey_num = result & 0xf;
+-      if (hkey_num < 0 || hkey_num > ARRAY_SIZE(pcc->keymap)) {
++      if (hkey_num < 0 || hkey_num >= ARRAY_SIZE(pcc->keymap)) {
+               ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+                                 "hotkey number out of range: %d\n",
+                                 hkey_num));
+--- kernel-maemo-2.6.28.test.orig/drivers/misc/sgi-xp/xpc.h
++++ kernel-maemo-2.6.28.test/drivers/misc/sgi-xp/xpc.h
+@@ -3,7 +3,7 @@
+  * License.  See the file "COPYING" in the main directory of this archive
+  * for more details.
+  *
+- * Copyright (c) 2004-2008 Silicon Graphics, Inc.  All Rights Reserved.
++ * Copyright (c) 2004-2009 Silicon Graphics, Inc.  All Rights Reserved.
+  */
+ /*
+@@ -502,7 +502,8 @@
+                                               /* partition's notify mq */
+       struct xpc_send_msg_slot_uv *send_msg_slots;
+-      struct xpc_notify_mq_msg_uv *recv_msg_slots;
++      void *recv_msg_slots;   /* each slot will hold a xpc_notify_mq_msg_uv */
++                              /* structure plus the user's payload */
+       struct xpc_fifo_head_uv msg_slot_free_list;
+       struct xpc_fifo_head_uv recv_msg_list;  /* deliverable payloads */
+--- kernel-maemo-2.6.28.test.orig/drivers/misc/sgi-xp/xpc_sn2.c
++++ kernel-maemo-2.6.28.test/drivers/misc/sgi-xp/xpc_sn2.c
+@@ -904,7 +904,7 @@
+       dev_dbg(xpc_part, "  remote_vars_pa = 0x%016lx\n",
+               part_sn2->remote_vars_pa);
+-      part->last_heartbeat = remote_vars->heartbeat;
++      part->last_heartbeat = remote_vars->heartbeat - 1;
+       dev_dbg(xpc_part, "  last_heartbeat = 0x%016lx\n",
+               part->last_heartbeat);
+@@ -1841,6 +1841,7 @@
+                */
+               xpc_clear_remote_msgqueue_flags_sn2(ch);
++              smp_wmb(); /* ensure flags have been cleared before bte_copy */
+               ch_sn2->w_remote_GP.put = ch_sn2->remote_GP.put;
+               dev_dbg(xpc_chan, "w_remote_GP.put changed to %ld, partid=%d, "
+@@ -1939,7 +1940,7 @@
+                       break;
+               get = ch_sn2->w_local_GP.get;
+-              rmb();  /* guarantee that .get loads before .put */
++              smp_rmb();      /* guarantee that .get loads before .put */
+               if (get == ch_sn2->w_remote_GP.put)
+                       break;
+@@ -1961,11 +1962,13 @@
+                       msg = xpc_pull_remote_msg_sn2(ch, get);
+-                      DBUG_ON(msg != NULL && msg->number != get);
+-                      DBUG_ON(msg != NULL && (msg->flags & XPC_M_SN2_DONE));
+-                      DBUG_ON(msg != NULL && !(msg->flags & XPC_M_SN2_READY));
++                      if (msg != NULL) {
++                              DBUG_ON(msg->number != get);
++                              DBUG_ON(msg->flags & XPC_M_SN2_DONE);
++                              DBUG_ON(!(msg->flags & XPC_M_SN2_READY));
+-                      payload = &msg->payload;
++                              payload = &msg->payload;
++                      }
+                       break;
+               }
+@@ -2058,7 +2061,7 @@
+       while (1) {
+               put = ch_sn2->w_local_GP.put;
+-              rmb();  /* guarantee that .put loads before .get */
++              smp_rmb();      /* guarantee that .put loads before .get */
+               if (put - ch_sn2->w_remote_GP.get < ch->local_nentries) {
+                       /* There are available message entries. We need to try
+@@ -2191,7 +2194,7 @@
+        * The preceding store of msg->flags must occur before the following
+        * load of local_GP->put.
+        */
+-      mb();
++      smp_mb();
+       /* see if the message is next in line to be sent, if so send it */
+@@ -2292,7 +2295,7 @@
+        * The preceding store of msg->flags must occur before the following
+        * load of local_GP->get.
+        */
+-      mb();
++      smp_mb();
+       /*
+        * See if this message is next in line to be acknowledged as having
+--- kernel-maemo-2.6.28.test.orig/drivers/misc/sgi-xp/xpc_uv.c
++++ kernel-maemo-2.6.28.test/drivers/misc/sgi-xp/xpc_uv.c
+@@ -3,7 +3,7 @@
+  * License.  See the file "COPYING" in the main directory of this archive
+  * for more details.
+  *
+- * Copyright (c) 2008 Silicon Graphics, Inc.  All Rights Reserved.
++ * Copyright (c) 2008-2009 Silicon Graphics, Inc.  All Rights Reserved.
+  */
+ /*
+@@ -825,8 +825,8 @@
+                       continue;
+               for (entry = 0; entry < nentries; entry++) {
+-                      msg_slot = ch_uv->recv_msg_slots + entry *
+-                          ch->entry_size;
++                      msg_slot = ch_uv->recv_msg_slots +
++                          entry * ch->entry_size;
+                       msg_slot->hdr.msg_slot_number = entry;
+               }
+@@ -1123,9 +1123,8 @@
+       /* we're dealing with a normal message sent via the notify_mq */
+       ch_uv = &ch->sn.uv;
+-      msg_slot = (struct xpc_notify_mq_msg_uv *)((u64)ch_uv->recv_msg_slots +
+-                  (msg->hdr.msg_slot_number % ch->remote_nentries) *
+-                  ch->entry_size);
++      msg_slot = ch_uv->recv_msg_slots +
++          (msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size;
+       BUG_ON(msg->hdr.msg_slot_number != msg_slot->hdr.msg_slot_number);
+       BUG_ON(msg_slot->hdr.size != 0);
+@@ -1238,7 +1237,7 @@
+               atomic_inc(&ch->n_to_notify);
+               msg_slot->key = key;
+-              wmb(); /* a non-NULL func must hit memory after the key */
++              smp_wmb(); /* a non-NULL func must hit memory after the key */
+               msg_slot->func = func;
+               if (ch->flags & XPC_C_DISCONNECTING) {
+--- kernel-maemo-2.6.28.test.orig/drivers/misc/thinkpad_acpi.c
++++ kernel-maemo-2.6.28.test/drivers/misc/thinkpad_acpi.c
+@@ -281,11 +281,17 @@
+ static struct workqueue_struct *tpacpi_wq;
++enum led_status_t {
++      TPACPI_LED_OFF = 0,
++      TPACPI_LED_ON,
++      TPACPI_LED_BLINK,
++};
++
+ /* Special LED class that can defer work */
+ struct tpacpi_led_classdev {
+       struct led_classdev led_classdev;
+       struct work_struct work;
+-      enum led_brightness new_brightness;
++      enum led_status_t new_state;
+       unsigned int led;
+ };
+@@ -3489,7 +3495,7 @@
+                       container_of(work, struct tpacpi_led_classdev, work);
+       if (likely(tpacpi_lifecycle == TPACPI_LIFE_RUNNING))
+-              light_set_status((data->new_brightness != LED_OFF));
++              light_set_status((data->new_state != TPACPI_LED_OFF));
+ }
+ static void light_sysfs_set(struct led_classdev *led_cdev,
+@@ -3499,7 +3505,8 @@
+               container_of(led_cdev,
+                            struct tpacpi_led_classdev,
+                            led_classdev);
+-      data->new_brightness = brightness;
++      data->new_state = (brightness != LED_OFF) ?
++                              TPACPI_LED_ON : TPACPI_LED_OFF;
+       queue_work(tpacpi_wq, &data->work);
+ }
+@@ -4006,12 +4013,6 @@
+       TPACPI_LED_EC_HLMS = 0x0e,      /* EC reg to select led to command */
+ };
+-enum led_status_t {
+-      TPACPI_LED_OFF = 0,
+-      TPACPI_LED_ON,
+-      TPACPI_LED_BLINK,
+-};
+-
+ static enum led_access_mode led_supported;
+ TPACPI_HANDLE(led, ec, "SLED",        /* 570 */
+@@ -4105,23 +4106,13 @@
+       return rc;
+ }
+-static void led_sysfs_set_status(unsigned int led,
+-                               enum led_brightness brightness)
+-{
+-      led_set_status(led,
+-                      (brightness == LED_OFF) ?
+-                      TPACPI_LED_OFF :
+-                      (tpacpi_led_state_cache[led] == TPACPI_LED_BLINK) ?
+-                              TPACPI_LED_BLINK : TPACPI_LED_ON);
+-}
+-
+ static void led_set_status_worker(struct work_struct *work)
+ {
+       struct tpacpi_led_classdev *data =
+               container_of(work, struct tpacpi_led_classdev, work);
+       if (likely(tpacpi_lifecycle == TPACPI_LIFE_RUNNING))
+-              led_sysfs_set_status(data->led, data->new_brightness);
++              led_set_status(data->led, data->new_state);
+ }
+ static void led_sysfs_set(struct led_classdev *led_cdev,
+@@ -4130,7 +4121,13 @@
+       struct tpacpi_led_classdev *data = container_of(led_cdev,
+                            struct tpacpi_led_classdev, led_classdev);
+-      data->new_brightness = brightness;
++      if (brightness == LED_OFF)
++              data->new_state = TPACPI_LED_OFF;
++      else if (tpacpi_led_state_cache[data->led] != TPACPI_LED_BLINK)
++              data->new_state = TPACPI_LED_ON;
++      else
++              data->new_state = TPACPI_LED_BLINK;
++
+       queue_work(tpacpi_wq, &data->work);
+ }
+@@ -4148,7 +4145,7 @@
+       } else if ((*delay_on != 500) || (*delay_off != 500))
+               return -EINVAL;
+-      data->new_brightness = TPACPI_LED_BLINK;
++      data->new_state = TPACPI_LED_BLINK;
+       queue_work(tpacpi_wq, &data->work);
+       return 0;
+@@ -6927,7 +6924,7 @@
+  * if it is not there yet.
+  */
+ #define IBM_BIOS_MODULE_ALIAS(__type) \
+-      MODULE_ALIAS("dmi:bvnIBM:bvr" __type "ET??WW")
++      MODULE_ALIAS("dmi:bvnIBM:bvr" __type "ET??WW*")
+ /* Non-ancient thinkpads */
+ MODULE_ALIAS("dmi:bvnIBM:*:svnIBM:*:pvrThinkPad*:rvnIBM:*");
+@@ -6936,9 +6933,9 @@
+ /* Ancient thinkpad BIOSes have to be identified by
+  * BIOS type or model number, and there are far less
+  * BIOS types than model numbers... */
+-IBM_BIOS_MODULE_ALIAS("I[B,D,H,I,M,N,O,T,W,V,Y,Z]");
+-IBM_BIOS_MODULE_ALIAS("1[0,3,6,8,A-G,I,K,M-P,S,T]");
+-IBM_BIOS_MODULE_ALIAS("K[U,X-Z]");
++IBM_BIOS_MODULE_ALIAS("I[BDHIMNOTWVYZ]");
++IBM_BIOS_MODULE_ALIAS("1[0368A-GIKM-PST]");
++IBM_BIOS_MODULE_ALIAS("K[UX-Z]");
+ MODULE_AUTHOR("Borislav Deianov, Henrique de Moraes Holschuh");
+ MODULE_DESCRIPTION(TPACPI_DESC);
+--- kernel-maemo-2.6.28.test.orig/drivers/mmc/card/mmc_test.c
++++ kernel-maemo-2.6.28.test/drivers/mmc/card/mmc_test.c
+@@ -494,7 +494,7 @@
+       sg_init_one(&sg, test->buffer, 512);
+-      ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
++      ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
+       if (ret)
+               return ret;
+--- kernel-maemo-2.6.28.test.orig/drivers/mmc/host/s3cmci.c
++++ kernel-maemo-2.6.28.test/drivers/mmc/host/s3cmci.c
+@@ -329,7 +329,7 @@
+       to_ptr = host->base + host->sdidata;
+-      while ((fifo = fifo_free(host))) {
++      while ((fifo = fifo_free(host)) > 3) {
+               if (!host->pio_bytes) {
+                       res = get_data_buffer(host, &host->pio_bytes,
+                                                       &host->pio_ptr);
+@@ -793,8 +793,7 @@
+                             host->mem->start + host->sdidata);
+       if (!setup_ok) {
+-              s3c2410_dma_config(host->dma, 4,
+-                      (S3C2410_DCON_HWTRIG | S3C2410_DCON_CH0_SDI));
++              s3c2410_dma_config(host->dma, 4, 0);
+               s3c2410_dma_set_buffdone_fn(host->dma,
+                                           s3cmci_dma_done_callback);
+               s3c2410_dma_setflags(host->dma, S3C2410_DMAF_AUTOSTART);
+--- kernel-maemo-2.6.28.test.orig/drivers/mmc/host/sdhci-pci.c
++++ kernel-maemo-2.6.28.test/drivers/mmc/host/sdhci-pci.c
+@@ -107,6 +107,7 @@
+ static const struct sdhci_pci_fixes sdhci_cafe = {
+       .quirks         = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
++                        SDHCI_QUIRK_NO_BUSY_IRQ |
+                         SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
+ };
+--- kernel-maemo-2.6.28.test.orig/drivers/mmc/host/sdhci.c
++++ kernel-maemo-2.6.28.test/drivers/mmc/host/sdhci.c
+@@ -1286,8 +1286,11 @@
+               if (host->cmd->data)
+                       DBG("Cannot wait for busy signal when also "
+                               "doing a data transfer");
+-              else
++              else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ))
+                       return;
++
++              /* The controller does not support the end-of-busy IRQ,
++               * fall through and take the SDHCI_INT_RESPONSE */
+       }
+       if (intmask & SDHCI_INT_RESPONSE)
+@@ -1718,7 +1721,9 @@
+ #endif
+ #ifdef CONFIG_LEDS_CLASS
+-      host->led.name = mmc_hostname(mmc);
++      snprintf(host->led_name, sizeof(host->led_name),
++              "%s::", mmc_hostname(mmc));
++      host->led.name = host->led_name;
+       host->led.brightness = LED_OFF;
+       host->led.default_trigger = mmc_hostname(mmc);
+       host->led.brightness_set = sdhci_led_control;
+--- kernel-maemo-2.6.28.test.orig/drivers/mmc/host/sdhci.h
++++ kernel-maemo-2.6.28.test/drivers/mmc/host/sdhci.h
+@@ -210,6 +210,8 @@
+ #define SDHCI_QUIRK_BROKEN_SMALL_PIO                  (1<<13)
+ /* Controller supports high speed but doesn't have the caps bit set */
+ #define SDHCI_QUIRK_FORCE_HIGHSPEED                   (1<<14)
++/* Controller does not provide transfer-complete interrupt when not busy */
++#define SDHCI_QUIRK_NO_BUSY_IRQ                               (1<<15)
+       int                     irq;            /* Device IRQ */
+       void __iomem *          ioaddr;         /* Mapped address */
+@@ -222,6 +224,7 @@
+ #ifdef CONFIG_LEDS_CLASS
+       struct led_classdev     led;            /* LED control */
++      char   led_name[32];
+ #endif
+       spinlock_t              lock;           /* Mutex */
+--- kernel-maemo-2.6.28.test.orig/drivers/mtd/devices/mtd_dataflash.c
++++ kernel-maemo-2.6.28.test/drivers/mtd/devices/mtd_dataflash.c
+@@ -815,7 +815,8 @@
+                                       if (!(info->flags & IS_POW2PS))
+                                               return info;
+                               }
+-                      }
++                      } else
++                              return info;
+               }
+       }
+--- kernel-maemo-2.6.28.test.orig/drivers/net/3c505.c
++++ kernel-maemo-2.6.28.test/drivers/net/3c505.c
+@@ -493,21 +493,27 @@
+       }
+       /* read the data */
+       spin_lock_irqsave(&adapter->lock, flags);
+-      i = 0;
+-      do {
+-              j = 0;
+-              while (((stat = get_status(dev->base_addr)) & ACRF) == 0 && j++ < 20000);
+-              pcb->data.raw[i++] = inb_command(dev->base_addr);
+-              if (i > MAX_PCB_DATA)
+-                      INVALID_PCB_MSG(i);
+-      } while ((stat & ASF_PCB_MASK) != ASF_PCB_END && j < 20000);
++      for (i = 0; i < MAX_PCB_DATA; i++) {
++              for (j = 0; j < 20000; j++) {
++                      stat = get_status(dev->base_addr);
++                      if (stat & ACRF)
++                              break;
++              }
++              pcb->data.raw[i] = inb_command(dev->base_addr);
++              if ((stat & ASF_PCB_MASK) == ASF_PCB_END || j >= 20000)
++                      break;
++      }
+       spin_unlock_irqrestore(&adapter->lock, flags);
++      if (i >= MAX_PCB_DATA) {
++              INVALID_PCB_MSG(i);
++              return false;
++      }
+       if (j >= 20000) {
+               TIMEOUT_MSG(__LINE__);
+               return false;
+       }
+-      /* woops, the last "data" byte was really the length! */
+-      total_length = pcb->data.raw[--i];
++      /* the last "data" byte was really the length! */
++      total_length = pcb->data.raw[i];
+       /* safety check total length vs data length */
+       if (total_length != (pcb->length + 2)) {
+--- kernel-maemo-2.6.28.test.orig/drivers/net/b44.c
++++ kernel-maemo-2.6.28.test/drivers/net/b44.c
+@@ -750,7 +750,7 @@
+                                            dest_idx * sizeof(dest_desc),
+                                            DMA_BIDIRECTIONAL);
+-      ssb_dma_sync_single_for_device(bp->sdev, le32_to_cpu(src_desc->addr),
++      ssb_dma_sync_single_for_device(bp->sdev, dest_map->mapping,
+                                      RX_PKT_BUF_SZ,
+                                      DMA_FROM_DEVICE);
+ }
+--- kernel-maemo-2.6.28.test.orig/drivers/net/bnx2x_main.c
++++ kernel-maemo-2.6.28.test/drivers/net/bnx2x_main.c
+@@ -8079,6 +8079,9 @@
+       struct bnx2x *bp = netdev_priv(dev);
+       int rc;
++      if (!netif_running(dev))
++              return -EAGAIN;
++
+       DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
+          DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
+          eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
+--- kernel-maemo-2.6.28.test.orig/drivers/net/bonding/bond_main.c
++++ kernel-maemo-2.6.28.test/drivers/net/bonding/bond_main.c
+@@ -3536,11 +3536,26 @@
+               }
+               break;
+       case NETDEV_CHANGE:
+-              /*
+-               * TODO: is this what we get if somebody
+-               * sets up a hierarchical bond, then rmmod's
+-               * one of the slave bonding devices?
+-               */
++              if (bond->params.mode == BOND_MODE_8023AD || bond_is_lb(bond)) {
++                      struct slave *slave;
++
++                      slave = bond_get_slave_by_dev(bond, slave_dev);
++                      if (slave) {
++                              u16 old_speed = slave->speed;
++                              u16 old_duplex = slave->duplex;
++
++                              bond_update_speed_duplex(slave);
++
++                              if (bond_is_lb(bond))
++                                      break;
++
++                              if (old_speed != slave->speed)
++                                      bond_3ad_adapter_speed_changed(slave);
++                              if (old_duplex != slave->duplex)
++                                      bond_3ad_adapter_duplex_changed(slave);
++                      }
++              }
++
+               break;
+       case NETDEV_DOWN:
+               /*
+--- kernel-maemo-2.6.28.test.orig/drivers/net/bonding/bonding.h
++++ kernel-maemo-2.6.28.test/drivers/net/bonding/bonding.h
+@@ -248,6 +248,12 @@
+       return (struct bonding *)slave->dev->master->priv;
+ }
++static inline bool bond_is_lb(const struct bonding *bond)
++{
++        return bond->params.mode == BOND_MODE_TLB
++                || bond->params.mode == BOND_MODE_ALB;
++}
++
+ #define BOND_FOM_NONE                 0
+ #define BOND_FOM_ACTIVE                       1
+ #define BOND_FOM_FOLLOW                       2
+--- kernel-maemo-2.6.28.test.orig/drivers/net/e1000/e1000_main.c
++++ kernel-maemo-2.6.28.test/drivers/net/e1000/e1000_main.c
+@@ -31,7 +31,7 @@
+ char e1000_driver_name[] = "e1000";
+ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
+-#define DRV_VERSION "7.3.20-k3-NAPI"
++#define DRV_VERSION "7.3.21-k3-NAPI"
+ const char e1000_driver_version[] = DRV_VERSION;
+ static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
+@@ -921,7 +921,7 @@
+               err = pci_enable_device(pdev);
+       } else {
+               bars = pci_select_bars(pdev, IORESOURCE_MEM);
+-              err = pci_enable_device(pdev);
++              err = pci_enable_device_mem(pdev);
+       }
+       if (err)
+               return err;
+@@ -3732,7 +3732,7 @@
+       struct e1000_hw *hw = &adapter->hw;
+       u32 rctl, icr = er32(ICR);
+-      if (unlikely(!icr))
++      if (unlikely((!icr) || test_bit(__E1000_RESETTING, &adapter->flags)))
+               return IRQ_NONE;  /* Not our interrupt */
+       /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
+--- kernel-maemo-2.6.28.test.orig/drivers/net/irda/irda-usb.c
++++ kernel-maemo-2.6.28.test/drivers/net/irda/irda-usb.c
+@@ -1075,7 +1075,7 @@
+ {
+       unsigned int i;
+       int ret;
+-      char stir421x_fw_name[11];
++      char stir421x_fw_name[12];
+       const struct firmware *fw;
+       const unsigned char *fw_version_ptr; /* pointer to version string */
+       unsigned long fw_version = 0;
+--- kernel-maemo-2.6.28.test.orig/drivers/net/r6040.c
++++ kernel-maemo-2.6.28.test/drivers/net/r6040.c
+@@ -49,8 +49,8 @@
+ #include <asm/processor.h>
+ #define DRV_NAME      "r6040"
+-#define DRV_VERSION   "0.18"
+-#define DRV_RELDATE   "13Jul2008"
++#define DRV_VERSION   "0.19"
++#define DRV_RELDATE   "18Dec2008"
+ /* PHY CHIP Address */
+ #define PHY1_ADDR     1       /* For MAC1 */
+@@ -214,7 +214,7 @@
+       /* Wait for the read bit to be cleared */
+       while (limit--) {
+               cmd = ioread16(ioaddr + MMDIO);
+-              if (cmd & MDIO_READ)
++              if (!(cmd & MDIO_READ))
+                       break;
+       }
+@@ -233,7 +233,7 @@
+       /* Wait for the write bit to be cleared */
+       while (limit--) {
+               cmd = ioread16(ioaddr + MMDIO);
+-              if (cmd & MDIO_WRITE)
++              if (!(cmd & MDIO_WRITE))
+                       break;
+       }
+ }
+@@ -681,8 +681,10 @@
+       struct net_device *dev = dev_id;
+       struct r6040_private *lp = netdev_priv(dev);
+       void __iomem *ioaddr = lp->base;
+-      u16 status;
++      u16 misr, status;
++      /* Save MIER */
++      misr = ioread16(ioaddr + MIER);
+       /* Mask off RDC MAC interrupt */
+       iowrite16(MSK_INT, ioaddr + MIER);
+       /* Read MISR status and clear */
+@@ -702,7 +704,7 @@
+                       dev->stats.rx_fifo_errors++;
+               /* Mask off RX interrupt */
+-              iowrite16(ioread16(ioaddr + MIER) & ~RX_INTS, ioaddr + MIER);
++              misr &= ~RX_INTS;
+               netif_rx_schedule(dev, &lp->napi);
+       }
+@@ -710,6 +712,9 @@
+       if (status & TX_INTS)
+               r6040_tx(dev);
++      /* Restore RDC MAC interrupt */
++      iowrite16(misr, ioaddr + MIER);
++
+       return IRQ_HANDLED;
+ }
+--- kernel-maemo-2.6.28.test.orig/drivers/net/r8169.c
++++ kernel-maemo-2.6.28.test/drivers/net/r8169.c
+@@ -2026,8 +2026,7 @@
+       if (!tp->pcie_cap && netif_msg_probe(tp))
+               dev_info(&pdev->dev, "no PCI Express capability\n");
+-      /* Unneeded ? Don't mess with Mrs. Murphy. */
+-      rtl8169_irq_mask_and_ack(ioaddr);
++      RTL_W16(IntrMask, 0x0000);
+       /* Soft reset the chip. */
+       RTL_W8(ChipCmd, CmdReset);
+@@ -2039,6 +2038,8 @@
+               msleep_interruptible(1);
+       }
++      RTL_W16(IntrStatus, 0xffff);
++
+       /* Identify chip attached to board */
+       rtl8169_get_mac_version(tp, ioaddr);
+--- kernel-maemo-2.6.28.test.orig/drivers/net/skfp/skfddi.c
++++ kernel-maemo-2.6.28.test/drivers/net/skfp/skfddi.c
+@@ -998,9 +998,9 @@
+               break;
+       case SKFP_CLR_STATS:    /* Zero out the driver statistics */
+               if (!capable(CAP_NET_ADMIN)) {
+-                      memset(&lp->MacStat, 0, sizeof(lp->MacStat));
+-              } else {
+                       status = -EPERM;
++              } else {
++                      memset(&lp->MacStat, 0, sizeof(lp->MacStat));
+               }
+               break;
+       default:
+--- kernel-maemo-2.6.28.test.orig/drivers/net/sky2.c
++++ kernel-maemo-2.6.28.test/drivers/net/sky2.c
+@@ -1403,9 +1403,6 @@
+       }
+-      if (netif_msg_ifup(sky2))
+-              printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
+-
+       netif_carrier_off(dev);
+       /* must be power of 2 */
+@@ -1484,6 +1481,9 @@
+       sky2_write32(hw, B0_IMSK, imask);
+       sky2_set_multicast(dev);
++
++      if (netif_msg_ifup(sky2))
++              printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
+       return 0;
+ err_out:
+--- kernel-maemo-2.6.28.test.orig/drivers/net/sungem.c
++++ kernel-maemo-2.6.28.test/drivers/net/sungem.c
+@@ -2222,6 +2222,8 @@
+       gp->running = 1;
++      napi_enable(&gp->napi);
++
+       if (gp->lstate == link_up) {
+               netif_carrier_on(gp->dev);
+               gem_set_link_modes(gp);
+@@ -2239,6 +2241,8 @@
+               spin_lock_irqsave(&gp->lock, flags);
+               spin_lock(&gp->tx_lock);
++              napi_disable(&gp->napi);
++
+               gp->running =  0;
+               gem_reset(gp);
+               gem_clean_rings(gp);
+@@ -2339,8 +2343,6 @@
+       if (!gp->asleep)
+               rc = gem_do_start(dev);
+       gp->opened = (rc == 0);
+-      if (gp->opened)
+-              napi_enable(&gp->napi);
+       mutex_unlock(&gp->pm_mutex);
+@@ -2477,8 +2479,6 @@
+               /* Re-attach net device */
+               netif_device_attach(dev);
+-
+-              napi_enable(&gp->napi);
+       }
+       spin_lock_irqsave(&gp->lock, flags);
+--- kernel-maemo-2.6.28.test.orig/drivers/net/tun.c
++++ kernel-maemo-2.6.28.test/drivers/net/tun.c
+@@ -157,10 +157,16 @@
+       nexact = n;
+-      /* The rest is hashed */
++      /* Remaining multicast addresses are hashed,
++       * unicast will leave the filter disabled. */
+       memset(filter->mask, 0, sizeof(filter->mask));
+-      for (; n < uf.count; n++)
++      for (; n < uf.count; n++) {
++              if (!is_multicast_ether_addr(addr[n].u)) {
++                      err = 0; /* no filter */
++                      goto done;
++              }
+               addr_hash_set(filter->mask, addr[n].u);
++      }
+       /* For ALLMULTI just set the mask to all ones.
+        * This overrides the mask populated above. */
+--- kernel-maemo-2.6.28.test.orig/drivers/net/usb/asix.c
++++ kernel-maemo-2.6.28.test/drivers/net/usb/asix.c
+@@ -1450,6 +1450,14 @@
+       // Cables-to-Go USB Ethernet Adapter
+       USB_DEVICE(0x0b95, 0x772a),
+       .driver_info = (unsigned long) &ax88772_info,
++}, {
++      // ABOCOM for pci
++      USB_DEVICE(0x14ea, 0xab11),
++      .driver_info = (unsigned long) &ax88178_info,
++}, {
++      // ASIX 88772a
++      USB_DEVICE(0x0db0, 0xa877),
++      .driver_info = (unsigned long) &ax88772_info,
+ },
+       { },            // END
+ };
+--- kernel-maemo-2.6.28.test.orig/drivers/net/usb/cdc_ether.c
++++ kernel-maemo-2.6.28.test/drivers/net/usb/cdc_ether.c
+@@ -559,6 +559,11 @@
+       USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
+                       USB_CDC_PROTO_NONE),
+       .driver_info = (unsigned long) &cdc_info,
++}, {
++      /* Ericsson F3507g */
++      USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1900, USB_CLASS_COMM,
++                      USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
++      .driver_info = (unsigned long) &cdc_info,
+ },
+       { },            // END
+ };
+--- kernel-maemo-2.6.28.test.orig/drivers/net/usb/zaurus.c
++++ kernel-maemo-2.6.28.test/drivers/net/usb/zaurus.c
+@@ -341,6 +341,11 @@
+       USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MDLM,
+                       USB_CDC_PROTO_NONE),
+       .driver_info = (unsigned long) &bogus_mdlm_info,
++}, {
++      /* Motorola MOTOMAGX phones */
++      USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x6425, USB_CLASS_COMM,
++                      USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
++      .driver_info = (unsigned long) &bogus_mdlm_info,
+ },
+ /* Olympus has some models with a Zaurus-compatible option.
+--- kernel-maemo-2.6.28.test.orig/drivers/net/virtio_net.c
++++ kernel-maemo-2.6.28.test/drivers/net/virtio_net.c
+@@ -24,6 +24,7 @@
+ #include <linux/virtio.h>
+ #include <linux/virtio_net.h>
+ #include <linux/scatterlist.h>
++#include <linux/if_vlan.h>
+ static int napi_weight = 128;
+ module_param(napi_weight, int, 0444);
+@@ -33,7 +34,7 @@
+ module_param(gso, bool, 0444);
+ /* FIXME: MTU in config. */
+-#define MAX_PACKET_LEN (ETH_HLEN+ETH_DATA_LEN)
++#define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
+ struct virtnet_info
+ {
+--- kernel-maemo-2.6.28.test.orig/drivers/net/wireless/ath5k/base.c
++++ kernel-maemo-2.6.28.test/drivers/net/wireless/ath5k/base.c
+@@ -2157,7 +2157,8 @@
+       if (sc->opmode == NL80211_IFTYPE_STATION) {
+               sc->imask |= AR5K_INT_BMISS;
+-      } else if (sc->opmode == NL80211_IFTYPE_ADHOC) {
++      } else if (sc->opmode == NL80211_IFTYPE_ADHOC ||
++                 sc->opmode == NL80211_IFTYPE_MESH_POINT) {
+               /*
+                * In IBSS mode we use a self-linked tx descriptor and let the
+                * hardware send the beacons automatically. We have to load it
+@@ -2748,6 +2749,7 @@
+       switch (conf->type) {
+       case NL80211_IFTYPE_STATION:
+       case NL80211_IFTYPE_ADHOC:
++      case NL80211_IFTYPE_MESH_POINT:
+       case NL80211_IFTYPE_MONITOR:
+               sc->opmode = conf->type;
+               break;
+@@ -2819,7 +2821,8 @@
+       }
+       if (conf->changed & IEEE80211_IFCC_BEACON &&
+-          vif->type == NL80211_IFTYPE_ADHOC) {
++          (vif->type == NL80211_IFTYPE_ADHOC ||
++           vif->type == NL80211_IFTYPE_MESH_POINT)) {
+               struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
+               if (!beacon) {
+                       ret = -ENOMEM;
+@@ -2951,6 +2954,9 @@
+               sc->opmode == NL80211_IFTYPE_ADHOC) {
+               rfilt |= AR5K_RX_FILTER_BEACON;
+       }
++      if (sc->opmode == NL80211_IFTYPE_MESH_POINT)
++              rfilt |= AR5K_RX_FILTER_CONTROL | AR5K_RX_FILTER_BEACON |
++                      AR5K_RX_FILTER_PROBEREQ | AR5K_RX_FILTER_PROM;
+       /* Set filters */
+       ath5k_hw_set_rx_filter(ah,rfilt);
+--- kernel-maemo-2.6.28.test.orig/drivers/net/wireless/ath5k/phy.c
++++ kernel-maemo-2.6.28.test/drivers/net/wireless/ath5k/phy.c
+@@ -2195,9 +2195,7 @@
+               return ret;
+       }
+-      ret = ath5k_hw_noise_floor_calibration(ah, channel->center_freq);
+-      if (ret)
+-              return ret;
++      ath5k_hw_noise_floor_calibration(ah, channel->center_freq);
+       /*
+        * Re-enable RX/TX and beacons
+--- kernel-maemo-2.6.28.test.orig/drivers/net/wireless/ath5k/reset.c
++++ kernel-maemo-2.6.28.test/drivers/net/wireless/ath5k/reset.c
+@@ -842,9 +842,7 @@
+        *
+        * XXX: Find an interval that's OK for all cards...
+        */
+-      ret = ath5k_hw_noise_floor_calibration(ah, channel->center_freq);
+-      if (ret)
+-              return ret;
++      ath5k_hw_noise_floor_calibration(ah, channel->center_freq);
+       /*
+        * Reset queues and start beacon timers at the end of the reset routine
+--- kernel-maemo-2.6.28.test.orig/drivers/net/wireless/ath9k/ath9k.h
++++ kernel-maemo-2.6.28.test/drivers/net/wireless/ath9k/ath9k.h
+@@ -590,8 +590,8 @@
+       u8 iso[3];
+ };
+-#define REG_WRITE(_ah, _reg, _val) iowrite32(_val, _ah->ah_sh + _reg)
+-#define REG_READ(_ah, _reg) ioread32(_ah->ah_sh + _reg)
++#define REG_WRITE(_ah, _reg, _val) ath9k_iowrite32((_ah), (_reg), (_val))
++#define REG_READ(_ah, _reg) ath9k_ioread32((_ah), (_reg))
+ #define SM(_v, _f)  (((_v) << _f##_S) & _f)
+ #define MS(_v, _f)  (((_v) & _f) >> _f##_S)
+--- kernel-maemo-2.6.28.test.orig/drivers/net/wireless/ath9k/core.c
++++ kernel-maemo-2.6.28.test/drivers/net/wireless/ath9k/core.c
+@@ -1089,6 +1089,7 @@
+       sc->sc_cachelsz = csz << 2;     /* convert to bytes */
+       spin_lock_init(&sc->sc_resetlock);
++      spin_lock_init(&sc->sc_serial_rw);
+       ah = ath9k_hw_attach(devid, sc, sc->mem, &status);
+       if (ah == NULL) {
+--- kernel-maemo-2.6.28.test.orig/drivers/net/wireless/ath9k/core.h
++++ kernel-maemo-2.6.28.test/drivers/net/wireless/ath9k/core.h
+@@ -1040,6 +1040,7 @@
+       spinlock_t sc_rxbuflock;
+       spinlock_t sc_txbuflock;
+       spinlock_t sc_resetlock;
++      spinlock_t sc_serial_rw;
+       spinlock_t node_lock;
+       /* LEDs */
+@@ -1081,4 +1082,36 @@
+       struct ath9k_country_entry *ctry);
+ u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp);
++/*
++ * Read and write, they both share the same lock. We do this to serialize
++ * reads and writes on Atheros 802.11n PCI devices only. This is required
++ * as the FIFO on these devices can only accept sanely 2 requests. After
++ * that the device goes bananas. Serializing the reads/writes prevents this
++ * from happening.
++ */
++
++static inline void ath9k_iowrite32(struct ath_hal *ah, u32 reg_offset, u32 val)
++{
++      if (ah->ah_config.serialize_regmode == SER_REG_MODE_ON) {
++              unsigned long flags;
++              spin_lock_irqsave(&ah->ah_sc->sc_serial_rw, flags);
++              iowrite32(val, ah->ah_sc->mem + reg_offset);
++              spin_unlock_irqrestore(&ah->ah_sc->sc_serial_rw, flags);
++      } else
++              iowrite32(val, ah->ah_sc->mem + reg_offset);
++}
++
++static inline unsigned int ath9k_ioread32(struct ath_hal *ah, u32 reg_offset)
++{
++      u32 val;
++      if (ah->ah_config.serialize_regmode == SER_REG_MODE_ON) {
++              unsigned long flags;
++              spin_lock_irqsave(&ah->ah_sc->sc_serial_rw, flags);
++              val = ioread32(ah->ah_sc->mem + reg_offset);
++              spin_unlock_irqrestore(&ah->ah_sc->sc_serial_rw, flags);
++      } else
++              val = ioread32(ah->ah_sc->mem + reg_offset);
++      return val;
++}
++
+ #endif /* CORE_H */
+--- kernel-maemo-2.6.28.test.orig/drivers/net/wireless/ath9k/hw.c
++++ kernel-maemo-2.6.28.test/drivers/net/wireless/ath9k/hw.c
+@@ -346,6 +346,25 @@
+       }
+       ah->ah_config.intr_mitigation = 0;
++
++      /*
++       * We need this for PCI devices only (Cardbus, PCI, miniPCI)
++       * _and_ if on non-uniprocessor systems (Multiprocessor/HT).
++       * This means we use it for all AR5416 devices, and the few
++       * minor PCI AR9280 devices out there.
++       *
++       * Serialization is required because these devices do not handle
++       * well the case of two concurrent reads/writes due to the latency
++       * involved. During one read/write another read/write can be issued
++       * on another CPU while the previous read/write may still be working
++       * on our hardware, if we hit this case the hardware poops in a loop.
++       * We prevent this by serializing reads and writes.
++       *
++       * This issue is not present on PCI-Express devices or pre-AR5416
++       * devices (legacy, 802.11abg).
++       */
++       if (num_possible_cpus() > 1)
++               ah->ah_config.serialize_regmode = SER_REG_MODE_AUTO;
+ }
+ static void ath9k_hw_override_ini(struct ath_hal *ah,
+@@ -3292,7 +3311,8 @@
+       }
+       if (ah->ah_config.serialize_regmode == SER_REG_MODE_AUTO) {
+-              if (ah->ah_macVersion == AR_SREV_VERSION_5416_PCI) {
++              if (ah->ah_macVersion == AR_SREV_VERSION_5416_PCI ||
++                  (AR_SREV_9280(ah) && !ah->ah_isPciExpress)) {
+                       ah->ah_config.serialize_regmode =
+                               SER_REG_MODE_ON;
+               } else {
+--- kernel-maemo-2.6.28.test.orig/drivers/net/wireless/ath9k/recv.c
++++ kernel-maemo-2.6.28.test/drivers/net/wireless/ath9k/recv.c
+@@ -627,9 +627,8 @@
+               rfilt &= ~ATH9K_RX_FILTER_UCAST;
+       }
+-      if (((sc->sc_ah->ah_opmode == ATH9K_M_STA) &&
+-           (sc->rx_filter & FIF_BCN_PRBRESP_PROMISC)) ||
+-          (sc->sc_ah->ah_opmode == ATH9K_M_IBSS))
++      if (sc->sc_ah->ah_opmode == ATH9K_M_STA ||
++                      sc->sc_ah->ah_opmode == ATH9K_M_IBSS)
+               rfilt |= ATH9K_RX_FILTER_BEACON;
+       /* If in HOSTAP mode, want to enable reception of PSPOLL frames
+--- kernel-maemo-2.6.28.test.orig/drivers/net/wireless/b43/xmit.c
++++ kernel-maemo-2.6.28.test/drivers/net/wireless/b43/xmit.c
+@@ -51,7 +51,7 @@
+ }
+ /* Extract the bitrate index out of an OFDM PLCP header. */
+-static u8 b43_plcp_get_bitrate_idx_ofdm(struct b43_plcp_hdr6 *plcp, bool aphy)
++static int b43_plcp_get_bitrate_idx_ofdm(struct b43_plcp_hdr6 *plcp, bool aphy)
+ {
+       int base = aphy ? 0 : 4;
+--- kernel-maemo-2.6.28.test.orig/drivers/net/wireless/ipw2200.c
++++ kernel-maemo-2.6.28.test/drivers/net/wireless/ipw2200.c
+@@ -4347,7 +4347,8 @@
+               return;
+       }
+-      if (priv->status & STATUS_SCANNING) {
++      if (priv->status & STATUS_SCANNING &&
++          missed_count > IPW_MB_SCAN_CANCEL_THRESHOLD) {
+               /* Stop scan to keep fw from getting
+                * stuck (only if we aren't roaming --
+                * otherwise we'll never scan more than 2 or 3
+@@ -6277,6 +6278,20 @@
+       }
+ }
++static int ipw_passive_dwell_time(struct ipw_priv *priv)
++{
++      /* staying on passive channels longer than the DTIM interval during a
++       * scan, while associated, causes the firmware to cancel the scan
++       * without notification. Hence, don't stay on passive channels longer
++       * than the beacon interval.
++       */
++      if (priv->status & STATUS_ASSOCIATED
++          && priv->assoc_network->beacon_interval > 10)
++              return priv->assoc_network->beacon_interval - 10;
++      else
++              return 120;
++}
++
+ static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct)
+ {
+       struct ipw_scan_request_ext scan;
+@@ -6320,16 +6335,16 @@
+       scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
+       if (type == IW_SCAN_TYPE_PASSIVE) {
+-              IPW_DEBUG_WX("use passive scanning\n");
+-              scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
++              IPW_DEBUG_WX("use passive scanning\n");
++              scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
+               scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
+-                      cpu_to_le16(120);
++                      cpu_to_le16(ipw_passive_dwell_time(priv));
+               ipw_add_scan_channels(priv, &scan, scan_type);
+               goto send_request;
+       }
+       /* Use active scan by default. */
+-      if (priv->config & CFG_SPEED_SCAN)
++      if (priv->config & CFG_SPEED_SCAN)
+               scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
+                       cpu_to_le16(30);
+       else
+@@ -6339,7 +6354,8 @@
+       scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
+               cpu_to_le16(20);
+-      scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
++      scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
++              cpu_to_le16(ipw_passive_dwell_time(priv));
+       scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
+ #ifdef CONFIG_IPW2200_MONITOR
+--- kernel-maemo-2.6.28.test.orig/drivers/net/wireless/ipw2200.h
++++ kernel-maemo-2.6.28.test/drivers/net/wireless/ipw2200.h
+@@ -244,6 +244,7 @@
+ #define HOST_NOTIFICATION_S36_MEASUREMENT_REFUSED       31
+ #define HOST_NOTIFICATION_STATUS_BEACON_MISSING         1
++#define IPW_MB_SCAN_CANCEL_THRESHOLD                    3
+ #define IPW_MB_ROAMING_THRESHOLD_MIN                    1
+ #define IPW_MB_ROAMING_THRESHOLD_DEFAULT                8
+ #define IPW_MB_ROAMING_THRESHOLD_MAX                    30
+--- kernel-maemo-2.6.28.test.orig/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
++++ kernel-maemo-2.6.28.test/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+@@ -647,12 +647,16 @@
+       s8 scale_action = 0;
+       unsigned long flags;
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+-      u16 fc, rate_mask;
++      u16 fc;
++      u16 rate_mask = 0;
+       struct iwl3945_priv *priv = (struct iwl3945_priv *)priv_r;
+       DECLARE_MAC_BUF(mac);
+       IWL_DEBUG_RATE("enter\n");
++      if (sta)
++              rate_mask = sta->supp_rates[sband->band];
++
+       /* Send management frames and broadcast/multicast data using lowest
+        * rate. */
+       fc = le16_to_cpu(hdr->frame_control);
+@@ -660,11 +664,13 @@
+           is_multicast_ether_addr(hdr->addr1) ||
+           !sta || !priv_sta) {
+               IWL_DEBUG_RATE("leave: No STA priv data to update!\n");
+-              sel->rate_idx = rate_lowest_index(sband, sta);
++              if (!rate_mask)
++                      sel->rate_idx = rate_lowest_index(sband, NULL);
++              else
++                      sel->rate_idx = rate_lowest_index(sband, sta);
+               return;
+       }
+-      rate_mask = sta->supp_rates[sband->band];
+       index = min(rs_sta->last_txrate_idx & 0xffff, IWL_RATE_COUNT - 1);
+       if (sband->band == IEEE80211_BAND_5GHZ)
+--- kernel-maemo-2.6.28.test.orig/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
++++ kernel-maemo-2.6.28.test/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+@@ -951,7 +951,8 @@
+       }
+       /* See if there's a better rate or modulation mode to try. */
+-      rs_rate_scale_perform(priv, hdr, sta, lq_sta);
++      if (sta && sta->supp_rates[sband->band])
++              rs_rate_scale_perform(priv, hdr, sta, lq_sta);
+ out:
+       return;
+ }
+@@ -2114,15 +2115,22 @@
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       __le16 fc;
+       struct iwl_lq_sta *lq_sta;
++      u64 mask_bit = 0;
+       IWL_DEBUG_RATE_LIMIT("rate scale calculate new rate for skb\n");
++      if (sta)
++              mask_bit = sta->supp_rates[sband->band];
++
+       /* Send management frames and broadcast/multicast data using lowest
+        * rate. */
+       fc = hdr->frame_control;
+       if (!ieee80211_is_data(fc) || is_multicast_ether_addr(hdr->addr1) ||
+           !sta || !priv_sta) {
+-              sel->rate_idx = rate_lowest_index(sband, sta);
++              if (!mask_bit)
++                      sel->rate_idx = rate_lowest_index(sband, NULL);
++              else
++                      sel->rate_idx = rate_lowest_index(sband, sta);
+               return;
+       }
+--- kernel-maemo-2.6.28.test.orig/drivers/net/wireless/iwlwifi/iwl-agn.c
++++ kernel-maemo-2.6.28.test/drivers/net/wireless/iwlwifi/iwl-agn.c
+@@ -1334,16 +1334,6 @@
+       priv->cfg->ops->lib->rx_handler_setup(priv);
+ }
+-/*
+- * this should be called while priv->lock is locked
+-*/
+-static void __iwl_rx_replenish(struct iwl_priv *priv)
+-{
+-      iwl_rx_allocate(priv);
+-      iwl_rx_queue_restock(priv);
+-}
+-
+-
+ /**
+  * iwl_rx_handle - Main entry function for receiving responses from uCode
+  *
+@@ -1451,7 +1441,7 @@
+                       count++;
+                       if (count >= 8) {
+                               priv->rxq.read = i;
+-                              __iwl_rx_replenish(priv);
++                              iwl_rx_queue_restock(priv);
+                               count = 0;
+                       }
+               }
+--- kernel-maemo-2.6.28.test.orig/drivers/net/wireless/iwlwifi/iwl-rx.c
++++ kernel-maemo-2.6.28.test/drivers/net/wireless/iwlwifi/iwl-rx.c
+@@ -245,25 +245,31 @@
+       struct list_head *element;
+       struct iwl_rx_mem_buffer *rxb;
+       unsigned long flags;
+-      spin_lock_irqsave(&rxq->lock, flags);
+-      while (!list_empty(&rxq->rx_used)) {
++
++      while (1) {
++              spin_lock_irqsave(&rxq->lock, flags);
++
++              if (list_empty(&rxq->rx_used)) {
++                      spin_unlock_irqrestore(&rxq->lock, flags);
++                      return;
++              }
+               element = rxq->rx_used.next;
+               rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
++              list_del(element);
++
++              spin_unlock_irqrestore(&rxq->lock, flags);
+               /* Alloc a new receive buffer */
+               rxb->skb = alloc_skb(priv->hw_params.rx_buf_size + 256,
+-                              __GFP_NOWARN | GFP_ATOMIC);
++                                   GFP_KERNEL);
+               if (!rxb->skb) {
+-                      if (net_ratelimit())
+-                              printk(KERN_CRIT DRV_NAME
+-                                     ": Can not allocate SKB buffers\n");
++                      printk(KERN_CRIT DRV_NAME
++                                 "Can not allocate SKB buffers\n");
+                       /* We don't reschedule replenish work here -- we will
+                        * call the restock method and if it still needs
+                        * more buffers it will schedule replenish */
+                       break;
+               }
+-              priv->alloc_rxb_skb++;
+-              list_del(element);
+               /* Get physical address of RB/SKB */
+               rxb->real_dma_addr = pci_map_single(
+@@ -277,12 +283,15 @@
+               rxb->aligned_dma_addr = ALIGN(rxb->real_dma_addr, 256);
+               skb_reserve(rxb->skb, rxb->aligned_dma_addr - rxb->real_dma_addr);
++              spin_lock_irqsave(&rxq->lock, flags);
++
+               list_add_tail(&rxb->list, &rxq->rx_free);
+               rxq->free_count++;
++              priv->alloc_rxb_skb++;
++
++              spin_unlock_irqrestore(&rxq->lock, flags);
+       }
+-      spin_unlock_irqrestore(&rxq->lock, flags);
+ }
+-EXPORT_SYMBOL(iwl_rx_allocate);
+ void iwl_rx_replenish(struct iwl_priv *priv)
+ {
+--- kernel-maemo-2.6.28.test.orig/drivers/net/wireless/orinoco.c
++++ kernel-maemo-2.6.28.test/drivers/net/wireless/orinoco.c
+@@ -4938,32 +4938,29 @@
+       struct orinoco_private *priv = netdev_priv(dev);
+       u8 *buf;
+       unsigned long flags;
+-      int err = 0;
+       if ((wrqu->data.length > MAX_WPA_IE_LEN) ||
+           (wrqu->data.length && (extra == NULL)))
+               return -EINVAL;
+-      if (orinoco_lock(priv, &flags) != 0)
+-              return -EBUSY;
+-
+       if (wrqu->data.length) {
+               buf = kmalloc(wrqu->data.length, GFP_KERNEL);
+-              if (buf == NULL) {
+-                      err = -ENOMEM;
+-                      goto out;
+-              }
++              if (buf == NULL)
++                      return -ENOMEM;
+               memcpy(buf, extra, wrqu->data.length);
+-              kfree(priv->wpa_ie);
+-              priv->wpa_ie = buf;
+-              priv->wpa_ie_len = wrqu->data.length;
+-      } else {
+-              kfree(priv->wpa_ie);
+-              priv->wpa_ie = NULL;
+-              priv->wpa_ie_len = 0;
++      } else
++              buf = NULL;
++
++      if (orinoco_lock(priv, &flags) != 0) {
++              kfree(buf);
++              return -EBUSY;
+       }
++      kfree(priv->wpa_ie);
++      priv->wpa_ie = buf;
++      priv->wpa_ie_len = wrqu->data.length;
++
+       if (priv->wpa_ie) {
+               /* Looks like wl_lkm wants to check the auth alg, and
+                * somehow pass it to the firmware.
+@@ -4972,9 +4969,8 @@
+                */
+       }
+-out:
+       orinoco_unlock(priv, &flags);
+-      return err;
++      return 0;
+ }
+ static int orinoco_ioctl_get_genie(struct net_device *dev,
+--- kernel-maemo-2.6.28.test.orig/drivers/net/wireless/p54/p54common.c
++++ kernel-maemo-2.6.28.test/drivers/net/wireless/p54/p54common.c
+@@ -741,17 +741,19 @@
+ int p54_read_eeprom(struct ieee80211_hw *dev)
+ {
+       struct p54_common *priv = dev->priv;
+-      struct p54_control_hdr *hdr = NULL;
++      struct p54_control_hdr *hdr = NULL, *org_hdr;
+       struct p54_eeprom_lm86 *eeprom_hdr;
+       size_t eeprom_size = 0x2020, offset = 0, blocksize;
+       int ret = -ENOMEM;
+       void *eeprom = NULL;
+-      hdr = (struct p54_control_hdr *)kzalloc(sizeof(*hdr) +
+-              sizeof(*eeprom_hdr) + EEPROM_READBACK_LEN, GFP_KERNEL);
+-      if (!hdr)
++      org_hdr = kzalloc(priv->tx_hdr_len + sizeof(*hdr) +
++                        sizeof(*eeprom_hdr) + EEPROM_READBACK_LEN,
++                        GFP_KERNEL);
++      if (!org_hdr)
+               goto free;
++      hdr = (void *) org_hdr + priv->tx_hdr_len;
+       priv->eeprom = kzalloc(EEPROM_READBACK_LEN, GFP_KERNEL);
+       if (!priv->eeprom)
+               goto free;
+@@ -790,7 +792,7 @@
+ free:
+       kfree(priv->eeprom);
+       priv->eeprom = NULL;
+-      kfree(hdr);
++      kfree(org_hdr);
+       kfree(eeprom);
+       return ret;
+--- kernel-maemo-2.6.28.test.orig/drivers/net/wireless/p54/p54usb.c
++++ kernel-maemo-2.6.28.test/drivers/net/wireless/p54/p54usb.c
+@@ -54,6 +54,7 @@
+       {USB_DEVICE(0x050d, 0x7050)},   /* Belkin F5D7050 ver 1000 */
+       {USB_DEVICE(0x0572, 0x2000)},   /* Cohiba Proto board */
+       {USB_DEVICE(0x0572, 0x2002)},   /* Cohiba Proto board */
++      {USB_DEVICE(0x06b9, 0x0121)},   /* Thomson SpeedTouch 121g */
+       {USB_DEVICE(0x0707, 0xee13)},   /* SMC 2862W-G version 2 */
+       {USB_DEVICE(0x083a, 0x4521)},   /* Siemens Gigaset USB Adapter 54 version 2 */
+       {USB_DEVICE(0x0846, 0x4240)},   /* Netgear WG111 (v2) */
+@@ -84,13 +85,13 @@
+       struct ieee80211_hw *dev = info->dev;
+       struct p54u_priv *priv = dev->priv;
++      skb_unlink(skb, &priv->rx_queue);
++
+       if (unlikely(urb->status)) {
+-              info->urb = NULL;
+-              usb_free_urb(urb);
++              dev_kfree_skb_irq(skb);
+               return;
+       }
+-      skb_unlink(skb, &priv->rx_queue);
+       skb_put(skb, urb->actual_length);
+       if (priv->hw_type == P54U_NET2280)
+@@ -103,7 +104,6 @@
+       if (p54_rx(dev, skb)) {
+               skb = dev_alloc_skb(priv->common.rx_mtu + 32);
+               if (unlikely(!skb)) {
+-                      usb_free_urb(urb);
+                       /* TODO check rx queue length and refill *somewhere* */
+                       return;
+               }
+@@ -113,7 +113,6 @@
+               info->dev = dev;
+               urb->transfer_buffer = skb_tail_pointer(skb);
+               urb->context = skb;
+-              skb_queue_tail(&priv->rx_queue, skb);
+       } else {
+               if (priv->hw_type == P54U_NET2280)
+                       skb_push(skb, priv->common.tx_hdr_len);
+@@ -128,22 +127,23 @@
+                       WARN_ON(1);
+                       urb->transfer_buffer = skb_tail_pointer(skb);
+               }
+-
+-              skb_queue_tail(&priv->rx_queue, skb);
+       }
+-      usb_submit_urb(urb, GFP_ATOMIC);
++      usb_anchor_urb(urb, &priv->submitted);
++      if (usb_submit_urb(urb, GFP_ATOMIC)) {
++              usb_unanchor_urb(urb);
++              dev_kfree_skb_irq(skb);
++      } else
++              skb_queue_tail(&priv->rx_queue, skb);
+ }
+-static void p54u_tx_cb(struct urb *urb)
+-{
+-      usb_free_urb(urb);
+-}
++static void p54u_tx_cb(struct urb *urb) { }
+-static void p54u_tx_free_cb(struct urb *urb)
++static void p54u_free_urbs(struct ieee80211_hw *dev)
+ {
+-      kfree(urb->transfer_buffer);
+-      usb_free_urb(urb);
++      struct p54u_priv *priv = dev->priv;
++
++      usb_kill_anchored_urbs(&priv->submitted);
+ }
+ static int p54u_init_urbs(struct ieee80211_hw *dev)
+@@ -152,15 +152,18 @@
+       struct urb *entry;
+       struct sk_buff *skb;
+       struct p54u_rx_info *info;
++      int ret = 0;
+       while (skb_queue_len(&priv->rx_queue) < 32) {
+               skb = __dev_alloc_skb(priv->common.rx_mtu + 32, GFP_KERNEL);
+-              if (!skb)
+-                      break;
++              if (!skb) {
++                      ret = -ENOMEM;
++                      goto err;
++              }
+               entry = usb_alloc_urb(0, GFP_KERNEL);
+               if (!entry) {
+-                      kfree_skb(skb);
+-                      break;
++                      ret = -ENOMEM;
++                      goto err;
+               }
+               usb_fill_bulk_urb(entry, priv->udev,
+                                 usb_rcvbulkpipe(priv->udev, P54U_PIPE_DATA),
+@@ -170,26 +173,25 @@
+               info->urb = entry;
+               info->dev = dev;
+               skb_queue_tail(&priv->rx_queue, skb);
+-              usb_submit_urb(entry, GFP_KERNEL);
++
++              usb_anchor_urb(entry, &priv->submitted);
++              ret = usb_submit_urb(entry, GFP_KERNEL);
++              if (ret) {
++                      skb_unlink(skb, &priv->rx_queue);
++                      usb_unanchor_urb(entry);
++                      goto err;
++              }
++              usb_free_urb(entry);
++              entry = NULL;
+       }
+       return 0;
+-}
+-
+-static void p54u_free_urbs(struct ieee80211_hw *dev)
+-{
+-      struct p54u_priv *priv = dev->priv;
+-      struct p54u_rx_info *info;
+-      struct sk_buff *skb;
+-
+-      while ((skb = skb_dequeue(&priv->rx_queue))) {
+-              info = (struct p54u_rx_info *) skb->cb;
+-              if (!info->urb)
+-                      continue;
+-              usb_kill_urb(info->urb);
+-              kfree_skb(skb);
+-      }
++err:
++      usb_free_urb(entry);
++      kfree_skb(skb);
++      p54u_free_urbs(dev);
++      return ret;
+ }
+ static void p54u_tx_3887(struct ieee80211_hw *dev, struct p54_control_hdr *data,
+@@ -209,23 +211,38 @@
+       }
+       usb_fill_bulk_urb(addr_urb, priv->udev,
+-              usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA), &data->req_id,
+-              sizeof(data->req_id), p54u_tx_cb, dev);
++                        usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA),
++                        &data->req_id, sizeof(data->req_id), p54u_tx_cb,
++                        dev);
+       usb_fill_bulk_urb(data_urb, priv->udev,
+-              usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA), data, len,
+-              free_on_tx ? p54u_tx_free_cb : p54u_tx_cb, dev);
++                        usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA),
++                        data, len, p54u_tx_cb, dev);
++      addr_urb->transfer_flags |= URB_ZERO_PACKET;
++      data_urb->transfer_flags |= URB_ZERO_PACKET |
++                                  (free_on_tx ? URB_FREE_BUFFER : 0);
++
++      usb_anchor_urb(addr_urb, &priv->submitted);
++      if (usb_submit_urb(addr_urb, GFP_ATOMIC)) {
++              usb_unanchor_urb(addr_urb);
++              goto out;
++      }
+-      usb_submit_urb(addr_urb, GFP_ATOMIC);
+-      usb_submit_urb(data_urb, GFP_ATOMIC);
++      usb_anchor_urb(data_urb, &priv->submitted);
++      if (usb_submit_urb(data_urb, GFP_ATOMIC))
++              usb_unanchor_urb(data_urb);
++
++out:
++      usb_free_urb(addr_urb);
++      usb_free_urb(data_urb);
+ }
+-static __le32 p54u_lm87_chksum(const u32 *data, size_t length)
++static __le32 p54u_lm87_chksum(const __le32 *data, size_t length)
+ {
+       u32 chk = 0;
+       length >>= 2;
+       while (length--) {
+-              chk ^= *data++;
++              chk ^= le32_to_cpu(*data++);
+               chk = (chk >> 5) ^ (chk << 3);
+       }
+@@ -244,15 +261,20 @@
+       if (!data_urb)
+               return;
+-      hdr->chksum = p54u_lm87_chksum((u32 *)data, len);
++      hdr->chksum = p54u_lm87_chksum((__le32 *) data, len);
+       hdr->device_addr = data->req_id;
+       usb_fill_bulk_urb(data_urb, priv->udev,
+-              usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA), hdr,
+-              len + sizeof(*hdr), free_on_tx ? p54u_tx_free_cb : p54u_tx_cb,
+-              dev);
++                        usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA), hdr,
++                        len + sizeof(*hdr), p54u_tx_cb, dev);
++      data_urb->transfer_flags |= URB_ZERO_PACKET |
++                                  (free_on_tx ? URB_FREE_BUFFER : 0);
++
++      usb_anchor_urb(data_urb, &priv->submitted);
++      if (usb_submit_urb(data_urb, GFP_ATOMIC))
++              usb_unanchor_urb(data_urb);
+-      usb_submit_urb(data_urb, GFP_ATOMIC);
++      usb_free_urb(data_urb);
+ }
+ static void p54u_tx_net2280(struct ieee80211_hw *dev, struct p54_control_hdr *data,
+@@ -291,14 +313,30 @@
+       hdr->len = cpu_to_le16(len);
+       usb_fill_bulk_urb(int_urb, priv->udev,
+-              usb_sndbulkpipe(priv->udev, P54U_PIPE_DEV), reg, sizeof(*reg),
+-              p54u_tx_free_cb, dev);
+-      usb_submit_urb(int_urb, GFP_ATOMIC);
++                        usb_sndbulkpipe(priv->udev, P54U_PIPE_DEV),
++                        reg, sizeof(*reg), p54u_tx_cb, dev);
++      int_urb->transfer_flags |= URB_ZERO_PACKET | URB_FREE_BUFFER;
++      usb_anchor_urb(int_urb, &priv->submitted);
++      if (usb_submit_urb(int_urb, GFP_ATOMIC)) {
++              usb_unanchor_urb(int_urb);
++              goto out;
++      }
+       usb_fill_bulk_urb(data_urb, priv->udev,
+-              usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA), hdr, len + sizeof(*hdr),
+-              free_on_tx ? p54u_tx_free_cb : p54u_tx_cb, dev);
+-      usb_submit_urb(data_urb, GFP_ATOMIC);
++                        usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA), hdr,
++                        len + sizeof(*hdr), p54u_tx_cb, dev);
++      data_urb->transfer_flags |= URB_ZERO_PACKET |
++                                  (free_on_tx ? URB_FREE_BUFFER : 0);
++
++      usb_anchor_urb(int_urb, &priv->submitted);
++      if (usb_submit_urb(data_urb, GFP_ATOMIC)) {
++              usb_unanchor_urb(data_urb);
++              goto out;
++      }
++
++out:
++      usb_free_urb(int_urb);
++      usb_free_urb(data_urb);
+ }
+ static int p54u_write(struct p54u_priv *priv,
+@@ -799,6 +837,7 @@
+       SET_IEEE80211_DEV(dev, &intf->dev);
+       usb_set_intfdata(intf, dev);
+       priv->udev = udev;
++      init_usb_anchor(&priv->submitted);
+       usb_get_dev(udev);
+--- kernel-maemo-2.6.28.test.orig/drivers/net/wireless/p54/p54usb.h
++++ kernel-maemo-2.6.28.test/drivers/net/wireless/p54/p54usb.h
+@@ -133,6 +133,7 @@
+       spinlock_t lock;
+       struct sk_buff_head rx_queue;
++      struct usb_anchor submitted;
+ };
+ #endif /* P54USB_H */
+--- kernel-maemo-2.6.28.test.orig/drivers/net/wireless/rt2x00/rt73usb.c
++++ kernel-maemo-2.6.28.test/drivers/net/wireless/rt2x00/rt73usb.c
+@@ -2434,6 +2434,7 @@
+       /* Linksys */
+       { USB_DEVICE(0x13b1, 0x0020), USB_DEVICE_DATA(&rt73usb_ops) },
+       { USB_DEVICE(0x13b1, 0x0023), USB_DEVICE_DATA(&rt73usb_ops) },
++      { USB_DEVICE(0x13b1, 0x0028), USB_DEVICE_DATA(&rt73usb_ops) },
+       /* MSI */
+       { USB_DEVICE(0x0db0, 0x6877), USB_DEVICE_DATA(&rt73usb_ops) },
+       { USB_DEVICE(0x0db0, 0x6874), USB_DEVICE_DATA(&rt73usb_ops) },
+--- kernel-maemo-2.6.28.test.orig/drivers/net/wireless/rtl8187_dev.c
++++ kernel-maemo-2.6.28.test/drivers/net/wireless/rtl8187_dev.c
+@@ -40,6 +40,10 @@
+       {USB_DEVICE(0x0bda, 0x8189), .driver_info = DEVICE_RTL8187B},
+       {USB_DEVICE(0x0bda, 0x8197), .driver_info = DEVICE_RTL8187B},
+       {USB_DEVICE(0x0bda, 0x8198), .driver_info = DEVICE_RTL8187B},
++      /* Surecom */
++      {USB_DEVICE(0x0769, 0x11F2), .driver_info = DEVICE_RTL8187},
++      /* Logitech */
++      {USB_DEVICE(0x0789, 0x010C), .driver_info = DEVICE_RTL8187},
+       /* Netgear */
+       {USB_DEVICE(0x0846, 0x6100), .driver_info = DEVICE_RTL8187},
+       {USB_DEVICE(0x0846, 0x6a00), .driver_info = DEVICE_RTL8187},
+@@ -49,8 +53,16 @@
+       /* Sitecom */
+       {USB_DEVICE(0x0df6, 0x000d), .driver_info = DEVICE_RTL8187},
+       {USB_DEVICE(0x0df6, 0x0028), .driver_info = DEVICE_RTL8187B},
++      /* Sphairon Access Systems GmbH */
++      {USB_DEVICE(0x114B, 0x0150), .driver_info = DEVICE_RTL8187},
++      /* Dick Smith Electronics */
++      {USB_DEVICE(0x1371, 0x9401), .driver_info = DEVICE_RTL8187},
+       /* Abocom */
+       {USB_DEVICE(0x13d1, 0xabe6), .driver_info = DEVICE_RTL8187},
++      /* Qcom */
++      {USB_DEVICE(0x18E8, 0x6232), .driver_info = DEVICE_RTL8187},
++      /* AirLive */
++      {USB_DEVICE(0x1b75, 0x8187), .driver_info = DEVICE_RTL8187},
+       {}
+ };
+@@ -263,6 +275,7 @@
+       usb_fill_bulk_urb(urb, priv->udev, usb_sndbulkpipe(priv->udev, ep),
+                         buf, skb->len, rtl8187_tx_cb, skb);
++      urb->transfer_flags |= URB_ZERO_PACKET;
+       rc = usb_submit_urb(urb, GFP_ATOMIC);
+       if (rc < 0) {
+               usb_free_urb(urb);
+--- kernel-maemo-2.6.28.test.orig/drivers/net/wireless/rtl8187_rtl8225.c
++++ kernel-maemo-2.6.28.test/drivers/net/wireless/rtl8187_rtl8225.c
+@@ -287,7 +287,10 @@
+       ofdm_power = priv->channels[channel - 1].hw_value >> 4;
+       cck_power = min(cck_power, (u8)11);
+-      ofdm_power = min(ofdm_power, (u8)35);
++      if (ofdm_power > (u8)15)
++              ofdm_power = 25;
++      else
++              ofdm_power += 10;
+       rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK,
+                        rtl8225_tx_gain_cck_ofdm[cck_power / 6] >> 1);
+@@ -540,7 +543,10 @@
+       cck_power += priv->txpwr_base & 0xF;
+       cck_power = min(cck_power, (u8)35);
+-      ofdm_power = min(ofdm_power, (u8)15);
++      if (ofdm_power > (u8)15)
++              ofdm_power = 25;
++      else
++              ofdm_power += 10;
+       ofdm_power += priv->txpwr_base >> 4;
+       ofdm_power = min(ofdm_power, (u8)35);
+--- kernel-maemo-2.6.28.test.orig/drivers/net/wireless/zd1211rw/zd_rf.c
++++ kernel-maemo-2.6.28.test/drivers/net/wireless/zd1211rw/zd_rf.c
+@@ -86,6 +86,7 @@
+       case AL7230B_RF:
+               r = zd_rf_init_al7230b(rf);
+               break;
++      case MAXIM_NEW_RF:
+       case UW2453_RF:
+               r = zd_rf_init_uw2453(rf);
+               break;
+--- kernel-maemo-2.6.28.test.orig/drivers/net/wireless/zd1211rw/zd_usb.c
++++ kernel-maemo-2.6.28.test/drivers/net/wireless/zd1211rw/zd_usb.c
+@@ -37,6 +37,7 @@
+ static struct usb_device_id usb_ids[] = {
+       /* ZD1211 */
+       { USB_DEVICE(0x0ace, 0x1211), .driver_info = DEVICE_ZD1211 },
++      { USB_DEVICE(0x0ace, 0xa211), .driver_info = DEVICE_ZD1211 },
+       { USB_DEVICE(0x07b8, 0x6001), .driver_info = DEVICE_ZD1211 },
+       { USB_DEVICE(0x126f, 0xa006), .driver_info = DEVICE_ZD1211 },
+       { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 },
+--- kernel-maemo-2.6.28.test.orig/drivers/parport/parport_serial.c
++++ kernel-maemo-2.6.28.test/drivers/parport/parport_serial.c
+@@ -64,6 +64,11 @@
+ static int __devinit netmos_parallel_init(struct pci_dev *dev, struct parport_pc_pci *card, int autoirq, int autodma)
+ {
++      /* the rule described below doesn't hold for this device */
++      if (dev->device == PCI_DEVICE_ID_NETMOS_9835 &&
++                      dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
++                      dev->subsystem_device == 0x0299)
++              return -ENODEV;
+       /*
+        * Netmos uses the subdevice ID to indicate the number of parallel
+        * and serial ports.  The form is 0x00PS, where <P> is the number of
+--- kernel-maemo-2.6.28.test.orig/drivers/pci/hotplug/pciehp_core.c
++++ kernel-maemo-2.6.28.test/drivers/pci/hotplug/pciehp_core.c
+@@ -126,8 +126,10 @@
+       mutex_lock(&slot->ctrl->crit_sect);
+       /* has it been >1 sec since our last toggle? */
+-      if ((get_seconds() - slot->last_emi_toggle) < 1)
++      if ((get_seconds() - slot->last_emi_toggle) < 1) {
++              mutex_unlock(&slot->ctrl->crit_sect);
+               return -EINVAL;
++      }
+       /* see what our current state is */
+       retval = get_lock_status(hotplug_slot, &value);
+--- kernel-maemo-2.6.28.test.orig/drivers/pci/intel-iommu.c
++++ kernel-maemo-2.6.28.test/drivers/pci/intel-iommu.c
+@@ -71,6 +71,8 @@
+ /* bitmap for indexing intel_iommus */
+ static int g_num_of_iommus;
++static int rwbf_quirk = 0;
++
+ static DEFINE_SPINLOCK(async_umap_flush_lock);
+ static LIST_HEAD(unmaps_to_do);
+@@ -506,7 +508,7 @@
+       u32 val;
+       unsigned long flag;
+-      if (!cap_rwbf(iommu->cap))
++      if (!rwbf_quirk && !cap_rwbf(iommu->cap))
+               return;
+       val = iommu->gcmd | DMA_GCMD_WBF;
+@@ -2436,3 +2438,13 @@
+       return pfn >> VTD_PAGE_SHIFT;
+ }
+ EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn);
++
++static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
++{
++      /* Mobile 4 Series Chipset neglects to set RWBF capability,
++         but needs it */
++      printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
++      rwbf_quirk = 1;
++}
++
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
+--- kernel-maemo-2.6.28.test.orig/drivers/pci/msi.c
++++ kernel-maemo-2.6.28.test/drivers/pci/msi.c
+@@ -378,21 +378,19 @@
+       entry->msi_attrib.masked = 1;
+       entry->msi_attrib.default_irq = dev->irq;       /* Save IOAPIC IRQ */
+       entry->msi_attrib.pos = pos;
+-      if (entry->msi_attrib.maskbit) {
+-              entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos,
+-                              entry->msi_attrib.is_64);
+-      }
+       entry->dev = dev;
+       if (entry->msi_attrib.maskbit) {
+-              unsigned int maskbits, temp;
++              unsigned int base, maskbits, temp;
++
++              base = msi_mask_bits_reg(pos, entry->msi_attrib.is_64);
++              entry->mask_base = (void __iomem *)(long)base;
++
+               /* All MSIs are unmasked by default, Mask them all */
+-              pci_read_config_dword(dev,
+-                      msi_mask_bits_reg(pos, entry->msi_attrib.is_64),
+-                      &maskbits);
++              pci_read_config_dword(dev, base, &maskbits);
+               temp = (1 << multi_msi_capable(control));
+               temp = ((temp - 1) & ~temp);
+               maskbits |= temp;
+-              pci_write_config_dword(dev, entry->msi_attrib.is_64, maskbits);
++              pci_write_config_dword(dev, base, maskbits);
+               entry->msi_attrib.maskbits_mask = temp;
+       }
+       list_add_tail(&entry->list, &dev->msi_list);
+--- kernel-maemo-2.6.28.test.orig/drivers/pci/pci-sysfs.c
++++ kernel-maemo-2.6.28.test/drivers/pci/pci-sysfs.c
+@@ -777,8 +777,8 @@
+               return -EINVAL;
+       
+       rom = pci_map_rom(pdev, &size); /* size starts out as PCI window size */
+-      if (!rom)
+-              return 0;
++      if (!rom || !size)
++              return -EIO;
+               
+       if (off >= size)
+               count = 0;
+--- kernel-maemo-2.6.28.test.orig/drivers/pci/pcie/aer/aerdrv_core.c
++++ kernel-maemo-2.6.28.test/drivers/pci/pcie/aer/aerdrv_core.c
+@@ -108,6 +108,34 @@
+ }
+ #endif  /*  0  */
++
++static void set_device_error_reporting(struct pci_dev *dev, void *data)
++{
++      bool enable = *((bool *)data);
++
++      if (dev->pcie_type != PCIE_RC_PORT &&
++          dev->pcie_type != PCIE_SW_UPSTREAM_PORT &&
++          dev->pcie_type != PCIE_SW_DOWNSTREAM_PORT)
++              return;
++
++      if (enable)
++              pci_enable_pcie_error_reporting(dev);
++      else
++              pci_disable_pcie_error_reporting(dev);
++}
++
++/**
++ * set_downstream_devices_error_reporting - enable/disable the error reporting  bits on the root port and its downstream ports.
++ * @dev: pointer to root port's pci_dev data structure
++ * @enable: true = enable error reporting, false = disable error reporting.
++ */
++static void set_downstream_devices_error_reporting(struct pci_dev *dev,
++                                                 bool enable)
++{
++      set_device_error_reporting(dev, &enable);
++      pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable);
++}
++
+ static int find_device_iter(struct device *device, void *data)
+ {
+       struct pci_dev *dev;
+@@ -525,15 +553,11 @@
+       pci_read_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, &reg32);
+       pci_write_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, reg32);
+-      /* Enable Root Port device reporting error itself */
+-      pci_read_config_word(pdev, pos+PCI_EXP_DEVCTL, &reg16);
+-      reg16 = reg16 |
+-              PCI_EXP_DEVCTL_CERE |
+-              PCI_EXP_DEVCTL_NFERE |
+-              PCI_EXP_DEVCTL_FERE |
+-              PCI_EXP_DEVCTL_URRE;
+-      pci_write_config_word(pdev, pos+PCI_EXP_DEVCTL,
+-              reg16);
++      /*
++       * Enable error reporting for the root port device and downstream port
++       * devices.
++       */
++      set_downstream_devices_error_reporting(pdev, true);
+       /* Enable Root Port's interrupt in response to error messages */
+       pci_write_config_dword(pdev,
+@@ -553,6 +577,12 @@
+       u32 reg32;
+       int pos;
++      /*
++       * Disable error reporting for the root port device and downstream port
++       * devices.
++       */
++      set_downstream_devices_error_reporting(pdev, false);
++
+       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
+       /* Disable Root's interrupt in response to error messages */
+       pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, 0);
+--- kernel-maemo-2.6.28.test.orig/drivers/pci/pcie/aspm.c
++++ kernel-maemo-2.6.28.test/drivers/pci/pcie/aspm.c
+@@ -33,6 +33,11 @@
+ struct pcie_link_state {
+       struct list_head sibiling;
+       struct pci_dev *pdev;
++      bool downstream_has_switch;
++
++      struct pcie_link_state *parent;
++      struct list_head children;
++      struct list_head link;
+       /* ASPM state */
+       unsigned int support_state;
+@@ -125,7 +130,7 @@
+       link_state->clk_pm_enabled = !!enable;
+ }
+-static void pcie_check_clock_pm(struct pci_dev *pdev)
++static void pcie_check_clock_pm(struct pci_dev *pdev, int blacklist)
+ {
+       int pos;
+       u32 reg32;
+@@ -149,10 +154,26 @@
+               if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN))
+                       enabled = 0;
+       }
+-      link_state->clk_pm_capable = capable;
+       link_state->clk_pm_enabled = enabled;
+       link_state->bios_clk_state = enabled;
+-      pcie_set_clock_pm(pdev, policy_to_clkpm_state(pdev));
++      if (!blacklist) {
++              link_state->clk_pm_capable = capable;
++              pcie_set_clock_pm(pdev, policy_to_clkpm_state(pdev));
++      } else {
++              link_state->clk_pm_capable = 0;
++              pcie_set_clock_pm(pdev, 0);
++      }
++}
++
++static bool pcie_aspm_downstream_has_switch(struct pci_dev *pdev)
++{
++      struct pci_dev *child_dev;
++
++      list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
++              if (child_dev->pcie_type == PCI_EXP_TYPE_UPSTREAM)
++                      return true;
++      }
++      return false;
+ }
+ /*
+@@ -419,9 +440,9 @@
+ {
+       struct pci_dev *child_dev;
+-      /* If no child, disable the link */
++      /* If no child, ignore the link */
+       if (list_empty(&pdev->subordinate->devices))
+-              return 0;
++              return state;
+       list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
+               if (child_dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) {
+                       /*
+@@ -462,6 +483,9 @@
+       int valid = 1;
+       struct pcie_link_state *link_state = pdev->link_state;
++      /* If no child, disable the link */
++      if (list_empty(&pdev->subordinate->devices))
++              state = 0;
+       /*
+        * if the downstream component has pci bridge function, don't do ASPM
+        * now
+@@ -493,20 +517,52 @@
+       link_state->enabled_state = state;
+ }
++static struct pcie_link_state *get_root_port_link(struct pcie_link_state *link)
++{
++      struct pcie_link_state *root_port_link = link;
++      while (root_port_link->parent)
++              root_port_link = root_port_link->parent;
++      return root_port_link;
++}
++
++/* check the whole hierarchy, and configure each link in the hierarchy */
+ static void __pcie_aspm_configure_link_state(struct pci_dev *pdev,
+       unsigned int state)
+ {
+       struct pcie_link_state *link_state = pdev->link_state;
++      struct pcie_link_state *root_port_link = get_root_port_link(link_state);
++      struct pcie_link_state *leaf;
+-      if (link_state->support_state == 0)
+-              return;
+       state &= PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1;
+-      /* state 0 means disabling aspm */
+-      state = pcie_aspm_check_state(pdev, state);
++      /* check all links who have specific root port link */
++      list_for_each_entry(leaf, &link_list, sibiling) {
++              if (!list_empty(&leaf->children) ||
++                      get_root_port_link(leaf) != root_port_link)
++                      continue;
++              state = pcie_aspm_check_state(leaf->pdev, state);
++      }
++      /* check root port link too in case it hasn't children */
++      state = pcie_aspm_check_state(root_port_link->pdev, state);
++
+       if (link_state->enabled_state == state)
+               return;
+-      __pcie_aspm_config_link(pdev, state);
++
++      /*
++       * we must change the hierarchy. See comments in
++       * __pcie_aspm_config_link for the order
++       **/
++      if (state & PCIE_LINK_STATE_L1) {
++              list_for_each_entry(leaf, &link_list, sibiling) {
++                      if (get_root_port_link(leaf) == root_port_link)
++                              __pcie_aspm_config_link(leaf->pdev, state);
++              }
++      } else {
++              list_for_each_entry_reverse(leaf, &link_list, sibiling) {
++                      if (get_root_port_link(leaf) == root_port_link)
++                              __pcie_aspm_config_link(leaf->pdev, state);
++              }
++      }
+ }
+ /*
+@@ -570,6 +626,7 @@
+       unsigned int state;
+       struct pcie_link_state *link_state;
+       int error = 0;
++      int blacklist;
+       if (aspm_disabled || !pdev->is_pcie || pdev->link_state)
+               return;
+@@ -580,29 +637,58 @@
+       if (list_empty(&pdev->subordinate->devices))
+               goto out;
+-      if (pcie_aspm_sanity_check(pdev))
+-              goto out;
++      blacklist = !!pcie_aspm_sanity_check(pdev);
+       mutex_lock(&aspm_lock);
+       link_state = kzalloc(sizeof(*link_state), GFP_KERNEL);
+       if (!link_state)
+               goto unlock_out;
+-      pdev->link_state = link_state;
+-      pcie_aspm_configure_common_clock(pdev);
+-
+-      pcie_aspm_cap_init(pdev);
++      link_state->downstream_has_switch = pcie_aspm_downstream_has_switch(pdev);
++      INIT_LIST_HEAD(&link_state->children);
++      INIT_LIST_HEAD(&link_state->link);
++      if (pdev->bus->self) {/* this is a switch */
++              struct pcie_link_state *parent_link_state;
++
++              parent_link_state = pdev->bus->parent->self->link_state;
++              if (!parent_link_state) {
++                      kfree(link_state);
++                      goto unlock_out;
++              }
++              list_add(&link_state->link, &parent_link_state->children);
++              link_state->parent = parent_link_state;
++      }
+-      /* config link state to avoid BIOS error */
+-      state = pcie_aspm_check_state(pdev, policy_to_aspm_state(pdev));
+-      __pcie_aspm_config_link(pdev, state);
++      pdev->link_state = link_state;
+-      pcie_check_clock_pm(pdev);
++      if (!blacklist) {
++              pcie_aspm_configure_common_clock(pdev);
++              pcie_aspm_cap_init(pdev);
++      } else {
++              link_state->enabled_state = PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1;
++              link_state->bios_aspm_state = 0;
++              /* Set support state to 0, so we will disable ASPM later */
++              link_state->support_state = 0;
++      }
+       link_state->pdev = pdev;
+       list_add(&link_state->sibiling, &link_list);
++      if (link_state->downstream_has_switch) {
++              /*
++               * If link has switch, delay the link config. The leaf link
++               * initialization will config the whole hierarchy. but we must
++               * make sure BIOS doesn't set unsupported link state
++               **/
++              state = pcie_aspm_check_state(pdev, link_state->bios_aspm_state);
++              __pcie_aspm_config_link(pdev, state);
++      } else
++              __pcie_aspm_configure_link_state(pdev,
++                      policy_to_aspm_state(pdev));
++
++      pcie_check_clock_pm(pdev, blacklist);
++
+ unlock_out:
+       if (error)
+               free_link_state(pdev);
+@@ -627,14 +713,15 @@
+       /*
+        * All PCIe functions are in one slot, remove one function will remove
+-       * the the whole slot, so just wait
++       * the whole slot, so just wait until we are the last function left.
+        */
+-      if (!list_empty(&parent->subordinate->devices))
++      if (!list_is_last(&pdev->bus_list, &parent->subordinate->devices))
+               goto out;
+       /* All functions are removed, so just disable ASPM for the link */
+       __pcie_aspm_config_one_dev(parent, 0);
+       list_del(&link_state->sibiling);
++      list_del(&link_state->link);
+       /* Clock PM is for endpoint device */
+       free_link_state(parent);
+--- kernel-maemo-2.6.28.test.orig/drivers/pci/pcie/portdrv_pci.c
++++ kernel-maemo-2.6.28.test/drivers/pci/pcie/portdrv_pci.c
+@@ -101,14 +101,13 @@
+       pcie_portdrv_save_config(dev);
+-      pci_enable_pcie_error_reporting(dev);
+-
+       return 0;
+ }
+ static void pcie_portdrv_remove (struct pci_dev *dev)
+ {
+       pcie_port_device_remove(dev);
++      pci_disable_device(dev);
+       kfree(pci_get_drvdata(dev));
+ }
+--- kernel-maemo-2.6.28.test.orig/drivers/pci/quirks.c
++++ kernel-maemo-2.6.28.test/drivers/pci/quirks.c
+@@ -23,6 +23,7 @@
+ #include <linux/acpi.h>
+ #include <linux/kallsyms.h>
+ #include <linux/dmi.h>
++#include <linux/pci-aspm.h>
+ #include "pci.h"
+ int isa_dma_bridge_buggy;
+@@ -1543,6 +1544,30 @@
+ }
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_e100_interrupt);
++/*
++ * The 82575 and 82598 may experience data corruption issues when transitioning
++ * out of L0S.  To prevent this we need to disable L0S on the pci-e link
++ */
++static void __devinit quirk_disable_aspm_l0s(struct pci_dev *dev)
++{
++      dev_info(&dev->dev, "Disabling L0s\n");
++      pci_disable_link_state(dev, PCIE_LINK_STATE_L0S);
++}
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a7, quirk_disable_aspm_l0s);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a9, quirk_disable_aspm_l0s);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10b6, quirk_disable_aspm_l0s);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c6, quirk_disable_aspm_l0s);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c7, quirk_disable_aspm_l0s);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c8, quirk_disable_aspm_l0s);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10d6, quirk_disable_aspm_l0s);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10db, quirk_disable_aspm_l0s);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10dd, quirk_disable_aspm_l0s);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10e1, quirk_disable_aspm_l0s);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10ec, quirk_disable_aspm_l0s);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
++
+ static void __devinit fixup_rev1_53c810(struct pci_dev* dev)
+ {
+       /* rev 1 ncr53c810 chips don't set the class at all which means
+@@ -1778,7 +1803,6 @@
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE,
+                       quirk_msi_ht_cap);
+-
+ /* The nVidia CK804 chipset may have 2 HT MSI mappings.
+  * MSI are supported if the MSI capability set in any of these mappings.
+  */
+@@ -1829,6 +1853,9 @@
+                        PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB,
+                        ht_enable_msi_mapping);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE,
++                       ht_enable_msi_mapping);
++
+ /* The P5N32-SLI Premium motherboard from Asus has a problem with msi
+  * for the MCP55 NIC. It is not yet determined whether the msi problem
+  * also affects other devices. As for now, turn off msi for this device.
+--- kernel-maemo-2.6.28.test.orig/drivers/pci/rom.c
++++ kernel-maemo-2.6.28.test/drivers/pci/rom.c
+@@ -63,7 +63,7 @@
+  * The PCI window size could be much larger than the
+  * actual image size.
+  */
+-size_t pci_get_rom_size(void __iomem *rom, size_t size)
++size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
+ {
+       void __iomem *image;
+       int last_image;
+@@ -72,8 +72,10 @@
+       do {
+               void __iomem *pds;
+               /* Standard PCI ROMs start out with these bytes 55 AA */
+-              if (readb(image) != 0x55)
++              if (readb(image) != 0x55) {
++                      dev_err(&pdev->dev, "Invalid ROM contents\n");
+                       break;
++              }
+               if (readb(image + 1) != 0xAA)
+                       break;
+               /* get the PCI data structure and check its signature */
+@@ -159,7 +161,7 @@
+        * size is much larger than the actual size of the ROM.
+        * True size is important if the ROM is going to be copied.
+        */
+-      *size = pci_get_rom_size(rom, *size);
++      *size = pci_get_rom_size(pdev, rom, *size);
+       return rom;
+ }
+--- kernel-maemo-2.6.28.test.orig/drivers/pci/syscall.c
++++ kernel-maemo-2.6.28.test/drivers/pci/syscall.c
+@@ -14,10 +14,8 @@
+ #include <asm/uaccess.h>
+ #include "pci.h"
+-asmlinkage long
+-sys_pciconfig_read(unsigned long bus, unsigned long dfn,
+-                 unsigned long off, unsigned long len,
+-                 void __user *buf)
++SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn,
++              unsigned long, off, unsigned long, len, void __user *, buf)
+ {
+       struct pci_dev *dev;
+       u8 byte;
+@@ -86,10 +84,8 @@
+       return err;
+ }
+-asmlinkage long
+-sys_pciconfig_write(unsigned long bus, unsigned long dfn,
+-                  unsigned long off, unsigned long len,
+-                  void __user *buf)
++SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn,
++              unsigned long, off, unsigned long, len, void __user *, buf)
+ {
+       struct pci_dev *dev;
+       u8 byte;
+--- kernel-maemo-2.6.28.test.orig/drivers/scsi/eata.c
++++ kernel-maemo-2.6.28.test/drivers/scsi/eata.c
+@@ -1626,8 +1626,15 @@
+       cpp->sense_len = SCSI_SENSE_BUFFERSIZE;
+-      count = scsi_dma_map(SCpnt);
+-      BUG_ON(count < 0);
++      if (!scsi_sg_count(SCpnt)) {
++              cpp->data_len = 0;
++              return;
++      }
++
++      count = pci_map_sg(ha->pdev, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
++                         pci_dir);
++      BUG_ON(!count);
++
+       scsi_for_each_sg(SCpnt, sg, count, k) {
+               cpp->sglist[k].address = H2DEV(sg_dma_address(sg));
+               cpp->sglist[k].num_bytes = H2DEV(sg_dma_len(sg));
+@@ -1655,7 +1662,9 @@
+               pci_unmap_single(ha->pdev, DEV2H(cpp->sense_addr),
+                                DEV2H(cpp->sense_len), PCI_DMA_FROMDEVICE);
+-      scsi_dma_unmap(SCpnt);
++      if (scsi_sg_count(SCpnt))
++              pci_unmap_sg(ha->pdev, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
++                           pci_dir);
+       if (!DEV2H(cpp->data_len))
+               pci_dir = PCI_DMA_BIDIRECTIONAL;
+--- kernel-maemo-2.6.28.test.orig/drivers/scsi/hptiop.c
++++ kernel-maemo-2.6.28.test/drivers/scsi/hptiop.c
+@@ -1251,6 +1251,7 @@
+       { PCI_VDEVICE(TTI, 0x3530), (kernel_ulong_t)&hptiop_itl_ops },
+       { PCI_VDEVICE(TTI, 0x3560), (kernel_ulong_t)&hptiop_itl_ops },
+       { PCI_VDEVICE(TTI, 0x4322), (kernel_ulong_t)&hptiop_itl_ops },
++      { PCI_VDEVICE(TTI, 0x4321), (kernel_ulong_t)&hptiop_itl_ops },
+       { PCI_VDEVICE(TTI, 0x4210), (kernel_ulong_t)&hptiop_itl_ops },
+       { PCI_VDEVICE(TTI, 0x4211), (kernel_ulong_t)&hptiop_itl_ops },
+       { PCI_VDEVICE(TTI, 0x4310), (kernel_ulong_t)&hptiop_itl_ops },
+--- kernel-maemo-2.6.28.test.orig/drivers/scsi/ibmvscsi/ibmvfc.c
++++ kernel-maemo-2.6.28.test/drivers/scsi/ibmvscsi/ibmvfc.c
+@@ -566,7 +566,7 @@
+       struct ibmvfc_target *tgt;
+       if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
+-              if (++vhost->init_retries > IBMVFC_MAX_INIT_RETRIES) {
++              if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
+                       dev_err(vhost->dev,
+                               "Host initialization retries exceeded. Taking adapter offline\n");
+                       ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
+@@ -847,11 +847,12 @@
+ static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
+ {
+       if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
+-              if (++vhost->init_retries > IBMVFC_MAX_INIT_RETRIES) {
++              vhost->delay_init = 1;
++              if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
+                       dev_err(vhost->dev,
+                               "Host initialization retries exceeded. Taking adapter offline\n");
+                       ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
+-              } else if (vhost->init_retries == IBMVFC_MAX_INIT_RETRIES)
++              } else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES)
+                       __ibmvfc_reset_host(vhost);
+               else
+                       ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
+@@ -2089,15 +2090,17 @@
+       case IBMVFC_AE_LINK_UP:
+       case IBMVFC_AE_RESUME:
+               vhost->events_to_log |= IBMVFC_AE_LINKUP;
+-              ibmvfc_init_host(vhost, 1);
++              vhost->delay_init = 1;
++              __ibmvfc_reset_host(vhost);
+               break;
+       case IBMVFC_AE_SCN_FABRIC:
++      case IBMVFC_AE_SCN_DOMAIN:
+               vhost->events_to_log |= IBMVFC_AE_RSCN;
+-              ibmvfc_init_host(vhost, 1);
++              vhost->delay_init = 1;
++              __ibmvfc_reset_host(vhost);
+               break;
+       case IBMVFC_AE_SCN_NPORT:
+       case IBMVFC_AE_SCN_GROUP:
+-      case IBMVFC_AE_SCN_DOMAIN:
+               vhost->events_to_log |= IBMVFC_AE_RSCN;
+       case IBMVFC_AE_ELS_LOGO:
+       case IBMVFC_AE_ELS_PRLO:
+@@ -2669,7 +2672,7 @@
+ static void ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
+                                 void (*job_step) (struct ibmvfc_target *))
+ {
+-      if (++tgt->init_retries > IBMVFC_MAX_INIT_RETRIES) {
++      if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
+               ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+               wake_up(&tgt->vhost->work_wait_q);
+       } else
+@@ -3519,7 +3522,13 @@
+               break;
+       case IBMVFC_HOST_ACTION_INIT:
+               BUG_ON(vhost->state != IBMVFC_INITIALIZING);
+-              vhost->job_step(vhost);
++              if (vhost->delay_init) {
++                      vhost->delay_init = 0;
++                      spin_unlock_irqrestore(vhost->host->host_lock, flags);
++                      ssleep(15);
++                      return;
++              } else
++                      vhost->job_step(vhost);
+               break;
+       case IBMVFC_HOST_ACTION_QUERY:
+               list_for_each_entry(tgt, &vhost->targets, queue)
+--- kernel-maemo-2.6.28.test.orig/drivers/scsi/ibmvscsi/ibmvfc.h
++++ kernel-maemo-2.6.28.test/drivers/scsi/ibmvscsi/ibmvfc.h
+@@ -33,7 +33,7 @@
+ #define IBMVFC_DRIVER_DATE            "(August 14, 2008)"
+ #define IBMVFC_DEFAULT_TIMEOUT        15
+-#define IBMVFC_INIT_TIMEOUT           30
++#define IBMVFC_INIT_TIMEOUT           120
+ #define IBMVFC_MAX_REQUESTS_DEFAULT   100
+ #define IBMVFC_DEBUG                  0
+@@ -43,7 +43,8 @@
+ #define IBMVFC_MAX_DISC_THREADS       4
+ #define IBMVFC_TGT_MEMPOOL_SZ         64
+ #define IBMVFC_MAX_CMDS_PER_LUN       64
+-#define IBMVFC_MAX_INIT_RETRIES       3
++#define IBMVFC_MAX_HOST_INIT_RETRIES  6
++#define IBMVFC_MAX_TGT_INIT_RETRIES           3
+ #define IBMVFC_DEV_LOSS_TMO           (5 * 60)
+ #define IBMVFC_DEFAULT_LOG_LEVEL      2
+ #define IBMVFC_MAX_CDB_LEN            16
+@@ -671,6 +672,7 @@
+       int discovery_threads;
+       int client_migrated;
+       int reinit;
++      int delay_init;
+       int events_to_log;
+ #define IBMVFC_AE_LINKUP      0x0001
+ #define IBMVFC_AE_LINKDOWN    0x0002
+--- kernel-maemo-2.6.28.test.orig/drivers/scsi/libiscsi.c
++++ kernel-maemo-2.6.28.test/drivers/scsi/libiscsi.c
+@@ -1862,12 +1862,14 @@
+               num_arrays++;
+       q->pool = kzalloc(num_arrays * max * sizeof(void*), GFP_KERNEL);
+       if (q->pool == NULL)
+-              goto enomem;
++              return -ENOMEM;
+       q->queue = kfifo_init((void*)q->pool, max * sizeof(void*),
+                             GFP_KERNEL, NULL);
+-      if (q->queue == ERR_PTR(-ENOMEM))
++      if (IS_ERR(q->queue)) {
++              q->queue = NULL;
+               goto enomem;
++      }
+       for (i = 0; i < max; i++) {
+               q->pool[i] = kzalloc(item_size, GFP_KERNEL);
+@@ -1897,8 +1899,8 @@
+       for (i = 0; i < q->max; i++)
+               kfree(q->pool[i]);
+-      if (q->pool)
+-              kfree(q->pool);
++      kfree(q->pool);
++      kfree(q->queue);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_pool_free);
+--- kernel-maemo-2.6.28.test.orig/drivers/scsi/mvsas.c
++++ kernel-maemo-2.6.28.test/drivers/scsi/mvsas.c
+@@ -2959,7 +2959,7 @@
+       /* enable auto port detection */
+       mw32(GBL_PORT_TYPE, MODE_AUTO_DET_EN);
+-      msleep(100);
++      msleep(1100);
+       /* init and reset phys */
+       for (i = 0; i < mvi->chip->n_phy; i++) {
+               u32 lo = be32_to_cpu(*(u32 *)&mvi->sas_addr[4]);
+--- kernel-maemo-2.6.28.test.orig/drivers/scsi/pcmcia/aha152x_stub.c
++++ kernel-maemo-2.6.28.test/drivers/scsi/pcmcia/aha152x_stub.c
+@@ -114,7 +114,7 @@
+     link->io.NumPorts1 = 0x20;
+     link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
+     link->io.IOAddrLines = 10;
+-    link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
++    link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
+     link->irq.IRQInfo1 = IRQ_LEVEL_ID;
+     link->conf.Attributes = CONF_ENABLE_IRQ;
+     link->conf.IntType = INT_MEMORY_AND_IO;
+--- kernel-maemo-2.6.28.test.orig/drivers/scsi/sd.c
++++ kernel-maemo-2.6.28.test/drivers/scsi/sd.c
+@@ -106,6 +106,7 @@
+ static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *);
+ static void sd_print_result(struct scsi_disk *, int);
++static DEFINE_SPINLOCK(sd_index_lock);
+ static DEFINE_IDA(sd_index_ida);
+ /* This semaphore is used to mediate the 0->1 reference get in the
+@@ -1847,7 +1848,9 @@
+               if (!ida_pre_get(&sd_index_ida, GFP_KERNEL))
+                       goto out_put;
++              spin_lock(&sd_index_lock);
+               error = ida_get_new(&sd_index_ida, &index);
++              spin_unlock(&sd_index_lock);
+       } while (error == -EAGAIN);
+       if (error)
+@@ -1910,7 +1913,9 @@
+       return 0;
+  out_free_index:
++      spin_lock(&sd_index_lock);
+       ida_remove(&sd_index_ida, index);
++      spin_unlock(&sd_index_lock);
+  out_put:
+       put_disk(gd);
+  out_free:
+@@ -1960,7 +1965,9 @@
+       struct scsi_disk *sdkp = to_scsi_disk(dev);
+       struct gendisk *disk = sdkp->disk;
+       
++      spin_lock(&sd_index_lock);
+       ida_remove(&sd_index_ida, sdkp->index);
++      spin_unlock(&sd_index_lock);
+       disk->private_data = NULL;
+       put_disk(disk);
+--- kernel-maemo-2.6.28.test.orig/drivers/scsi/sg.c
++++ kernel-maemo-2.6.28.test/drivers/scsi/sg.c
+@@ -101,6 +101,7 @@
+ #define SG_SECTOR_MSK (SG_SECTOR_SZ - 1)
+ static int sg_add(struct device *, struct class_interface *);
++static void sg_device_destroy(struct kref *kref);
+ static void sg_remove(struct device *, struct class_interface *);
+ static DEFINE_IDR(sg_index_idr);
+@@ -137,6 +138,7 @@
+       volatile char done;     /* 0->before bh, 1->before read, 2->read */
+       struct request *rq;
+       struct bio *bio;
++      struct execute_work ew;
+ } Sg_request;
+ typedef struct sg_fd {                /* holds the state of a file descriptor */
+@@ -158,6 +160,8 @@
+       char next_cmd_len;      /* 0 -> automatic (def), >0 -> use on next write() */
+       char keep_orphan;       /* 0 -> drop orphan (def), 1 -> keep for read() */
+       char mmap_called;       /* 0 -> mmap() never called on this fd */
++      struct kref f_ref;
++      struct execute_work ew;
+ } Sg_fd;
+ typedef struct sg_device { /* holds the state of each scsi generic device */
+@@ -171,6 +175,7 @@
+       char sgdebug;           /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
+       struct gendisk *disk;
+       struct cdev * cdev;     /* char_dev [sysfs: /sys/cdev/major/sg<n>] */
++      struct kref d_ref;
+ } Sg_device;
+ static int sg_fasync(int fd, struct file *filp, int mode);
+@@ -185,7 +190,7 @@
+                          Sg_request * srp);
+ static ssize_t sg_new_write(Sg_fd *sfp, struct file *file,
+                       const char __user *buf, size_t count, int blocking,
+-                      int read_only, Sg_request **o_srp);
++                      int read_only, int sg_io_owned, Sg_request **o_srp);
+ static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
+                          unsigned char *cmnd, int timeout, int blocking);
+ static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer);
+@@ -194,13 +199,14 @@
+ static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
+ static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
+ static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev);
+-static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
+-static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
++static void sg_remove_sfp(struct kref *);
+ static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
+ static Sg_request *sg_add_request(Sg_fd * sfp);
+ static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
+ static int sg_res_in_use(Sg_fd * sfp);
++static Sg_device *sg_lookup_dev(int dev);
+ static Sg_device *sg_get_dev(int dev);
++static void sg_put_dev(Sg_device *sdp);
+ #ifdef CONFIG_SCSI_PROC_FS
+ static int sg_last_dev(void);
+ #endif
+@@ -237,22 +243,17 @@
+       nonseekable_open(inode, filp);
+       SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags));
+       sdp = sg_get_dev(dev);
+-      if ((!sdp) || (!sdp->device)) {
+-              unlock_kernel();
+-              return -ENXIO;
+-      }
+-      if (sdp->detached) {
+-              unlock_kernel();
+-              return -ENODEV;
++      if (IS_ERR(sdp)) {
++              retval = PTR_ERR(sdp);
++              sdp = NULL;
++              goto sg_put;
+       }
+       /* This driver's module count bumped by fops_get in <linux/fs.h> */
+       /* Prevent the device driver from vanishing while we sleep */
+       retval = scsi_device_get(sdp->device);
+-      if (retval) {
+-              unlock_kernel();
+-              return retval;
+-      }
++      if (retval)
++              goto sg_put;
+       if (!((flags & O_NONBLOCK) ||
+             scsi_block_when_processing_errors(sdp->device))) {
+@@ -303,16 +304,20 @@
+       if ((sfp = sg_add_sfp(sdp, dev)))
+               filp->private_data = sfp;
+       else {
+-              if (flags & O_EXCL)
++              if (flags & O_EXCL) {
+                       sdp->exclude = 0;       /* undo if error */
++                      wake_up_interruptible(&sdp->o_excl_wait);
++              }
+               retval = -ENOMEM;
+               goto error_out;
+       }
+-      unlock_kernel();
+-      return 0;
+-
+-      error_out:
+-      scsi_device_put(sdp->device);
++      retval = 0;
++error_out:
++      if (retval)
++              scsi_device_put(sdp->device);
++sg_put:
++      if (sdp)
++              sg_put_dev(sdp);
+       unlock_kernel();
+       return retval;
+ }
+@@ -327,13 +332,13 @@
+       if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
+               return -ENXIO;
+       SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name));
+-      if (0 == sg_remove_sfp(sdp, sfp)) {     /* Returns 1 when sdp gone */
+-              if (!sdp->detached) {
+-                      scsi_device_put(sdp->device);
+-              }
+-              sdp->exclude = 0;
+-              wake_up_interruptible(&sdp->o_excl_wait);
+-      }
++
++      sfp->closed = 1;
++
++      sdp->exclude = 0;
++      wake_up_interruptible(&sdp->o_excl_wait);
++
++      kref_put(&sfp->f_ref, sg_remove_sfp);
+       return 0;
+ }
+@@ -557,7 +562,8 @@
+               return -EFAULT;
+       blocking = !(filp->f_flags & O_NONBLOCK);
+       if (old_hdr.reply_len < 0)
+-              return sg_new_write(sfp, filp, buf, count, blocking, 0, NULL);
++              return sg_new_write(sfp, filp, buf, count,
++                                  blocking, 0, 0, NULL);
+       if (count < (SZ_SG_HEADER + 6))
+               return -EIO;    /* The minimum scsi command length is 6 bytes. */
+@@ -638,7 +644,7 @@
+ static ssize_t
+ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
+-               size_t count, int blocking, int read_only,
++               size_t count, int blocking, int read_only, int sg_io_owned,
+                Sg_request **o_srp)
+ {
+       int k;
+@@ -658,6 +664,7 @@
+               SCSI_LOG_TIMEOUT(1, printk("sg_new_write: queue full\n"));
+               return -EDOM;
+       }
++      srp->sg_io_owned = sg_io_owned;
+       hp = &srp->header;
+       if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) {
+               sg_remove_request(sfp, srp);
+@@ -755,24 +762,13 @@
+       hp->duration = jiffies_to_msecs(jiffies);
+       srp->rq->timeout = timeout;
++      kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
+       blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk,
+                             srp->rq, 1, sg_rq_end_io);
+       return 0;
+ }
+ static int
+-sg_srp_done(Sg_request *srp, Sg_fd *sfp)
+-{
+-      unsigned long iflags;
+-      int done;
+-
+-      read_lock_irqsave(&sfp->rq_list_lock, iflags);
+-      done = srp->done;
+-      read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+-      return done;
+-}
+-
+-static int
+ sg_ioctl(struct inode *inode, struct file *filp,
+        unsigned int cmd_in, unsigned long arg)
+ {
+@@ -804,27 +800,26 @@
+                               return -EFAULT;
+                       result =
+                           sg_new_write(sfp, filp, p, SZ_SG_IO_HDR,
+-                                       blocking, read_only, &srp);
++                                       blocking, read_only, 1, &srp);
+                       if (result < 0)
+                               return result;
+-                      srp->sg_io_owned = 1;
+                       while (1) {
+                               result = 0;     /* following macro to beat race condition */
+                               __wait_event_interruptible(sfp->read_wait,
+-                                      (sdp->detached || sfp->closed || sg_srp_done(srp, sfp)),
+-                                                         result);
++                                      (srp->done || sdp->detached),
++                                      result);
+                               if (sdp->detached)
+                                       return -ENODEV;
+-                              if (sfp->closed)
+-                                      return 0;       /* request packet dropped already */
+-                              if (0 == result)
++                              write_lock_irq(&sfp->rq_list_lock);
++                              if (srp->done) {
++                                      srp->done = 2;
++                                      write_unlock_irq(&sfp->rq_list_lock);
+                                       break;
++                              }
+                               srp->orphan = 1;
++                              write_unlock_irq(&sfp->rq_list_lock);
+                               return result;  /* -ERESTARTSYS because signal hit process */
+                       }
+-                      write_lock_irqsave(&sfp->rq_list_lock, iflags);
+-                      srp->done = 2;
+-                      write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+                       result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp);
+                       return (result < 0) ? result : 0;
+               }
+@@ -1240,6 +1235,15 @@
+       return 0;
+ }
++static void sg_rq_end_io_usercontext(struct work_struct *work)
++{
++      struct sg_request *srp = container_of(work, struct sg_request, ew.work);
++      struct sg_fd *sfp = srp->parentfp;
++
++      sg_finish_rem_req(srp);
++      kref_put(&sfp->f_ref, sg_remove_sfp);
++}
++
+ /*
+  * This function is a "bottom half" handler that is called by the mid
+  * level when a command is completed (or has failed).
+@@ -1247,24 +1251,23 @@
+ static void sg_rq_end_io(struct request *rq, int uptodate)
+ {
+       struct sg_request *srp = rq->end_io_data;
+-      Sg_device *sdp = NULL;
++      Sg_device *sdp;
+       Sg_fd *sfp;
+       unsigned long iflags;
+       unsigned int ms;
+       char *sense;
+-      int result, resid;
++      int result, resid, done = 1;
+-      if (NULL == srp) {
+-              printk(KERN_ERR "sg_cmd_done: NULL request\n");
++      if (WARN_ON(srp->done != 0))
+               return;
+-      }
++
+       sfp = srp->parentfp;
+-      if (sfp)
+-              sdp = sfp->parentdp;
+-      if ((NULL == sdp) || sdp->detached) {
+-              printk(KERN_INFO "sg_cmd_done: device detached\n");
++      if (WARN_ON(sfp == NULL))
+               return;
+-      }
++
++      sdp = sfp->parentdp;
++      if (unlikely(sdp->detached))
++              printk(KERN_INFO "sg_rq_end_io: device detached\n");
+       sense = rq->sense;
+       result = rq->errors;
+@@ -1303,33 +1306,25 @@
+       }
+       /* Rely on write phase to clean out srp status values, so no "else" */
+-      if (sfp->closed) {      /* whoops this fd already released, cleanup */
+-              SCSI_LOG_TIMEOUT(1, printk("sg_cmd_done: already closed, freeing ...\n"));
+-              sg_finish_rem_req(srp);
+-              srp = NULL;
+-              if (NULL == sfp->headrp) {
+-                      SCSI_LOG_TIMEOUT(1, printk("sg_cmd_done: already closed, final cleanup\n"));
+-                      if (0 == sg_remove_sfp(sdp, sfp)) {     /* device still present */
+-                              scsi_device_put(sdp->device);
+-                      }
+-                      sfp = NULL;
+-              }
+-      } else if (srp && srp->orphan) {
++      write_lock_irqsave(&sfp->rq_list_lock, iflags);
++      if (unlikely(srp->orphan)) {
+               if (sfp->keep_orphan)
+                       srp->sg_io_owned = 0;
+-              else {
+-                      sg_finish_rem_req(srp);
+-                      srp = NULL;
+-              }
++              else
++                      done = 0;
+       }
+-      if (sfp && srp) {
+-              /* Now wake up any sg_read() that is waiting for this packet. */
+-              kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN);
+-              write_lock_irqsave(&sfp->rq_list_lock, iflags);
+-              srp->done = 1;
++      srp->done = done;
++      write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
++
++      if (likely(done)) {
++              /* Now wake up any sg_read() that is waiting for this
++               * packet.
++               */
+               wake_up_interruptible(&sfp->read_wait);
+-              write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+-      }
++              kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN);
++              kref_put(&sfp->f_ref, sg_remove_sfp);
++      } else
++              execute_in_process_context(sg_rq_end_io_usercontext, &srp->ew);
+ }
+ static struct file_operations sg_fops = {
+@@ -1364,17 +1359,18 @@
+               printk(KERN_WARNING "kmalloc Sg_device failure\n");
+               return ERR_PTR(-ENOMEM);
+       }
+-      error = -ENOMEM;
++
+       if (!idr_pre_get(&sg_index_idr, GFP_KERNEL)) {
+               printk(KERN_WARNING "idr expansion Sg_device failure\n");
++              error = -ENOMEM;
+               goto out;
+       }
+       write_lock_irqsave(&sg_index_lock, iflags);
+-      error = idr_get_new(&sg_index_idr, sdp, &k);
+-      write_unlock_irqrestore(&sg_index_lock, iflags);
++      error = idr_get_new(&sg_index_idr, sdp, &k);
+       if (error) {
++              write_unlock_irqrestore(&sg_index_lock, iflags);
+               printk(KERN_WARNING "idr allocation Sg_device failure: %d\n",
+                      error);
+               goto out;
+@@ -1391,6 +1387,9 @@
+       init_waitqueue_head(&sdp->o_excl_wait);
+       sdp->sg_tablesize = min(q->max_hw_segments, q->max_phys_segments);
+       sdp->index = k;
++      kref_init(&sdp->d_ref);
++
++      write_unlock_irqrestore(&sg_index_lock, iflags);
+       error = 0;
+  out:
+@@ -1401,6 +1400,8 @@
+       return sdp;
+  overflow:
++      idr_remove(&sg_index_idr, k);
++      write_unlock_irqrestore(&sg_index_lock, iflags);
+       sdev_printk(KERN_WARNING, scsidp,
+                   "Unable to attach sg device type=%d, minor "
+                   "number exceeds %d\n", scsidp->type, SG_MAX_DEVS - 1);
+@@ -1488,49 +1489,46 @@
+       return error;
+ }
+-static void
+-sg_remove(struct device *cl_dev, struct class_interface *cl_intf)
++static void sg_device_destroy(struct kref *kref)
++{
++      struct sg_device *sdp = container_of(kref, struct sg_device, d_ref);
++      unsigned long flags;
++
++      /* CAUTION!  Note that the device can still be found via idr_find()
++       * even though the refcount is 0.  Therefore, do idr_remove() BEFORE
++       * any other cleanup.
++       */
++
++      write_lock_irqsave(&sg_index_lock, flags);
++      idr_remove(&sg_index_idr, sdp->index);
++      write_unlock_irqrestore(&sg_index_lock, flags);
++
++      SCSI_LOG_TIMEOUT(3,
++              printk("sg_device_destroy: %s\n",
++                      sdp->disk->disk_name));
++
++      put_disk(sdp->disk);
++      kfree(sdp);
++}
++
++static void sg_remove(struct device *cl_dev, struct class_interface *cl_intf)
+ {
+       struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
+       Sg_device *sdp = dev_get_drvdata(cl_dev);
+       unsigned long iflags;
+       Sg_fd *sfp;
+-      Sg_fd *tsfp;
+-      Sg_request *srp;
+-      Sg_request *tsrp;
+-      int delay;
+-      if (!sdp)
++      if (!sdp || sdp->detached)
+               return;
+-      delay = 0;
++      SCSI_LOG_TIMEOUT(3, printk("sg_remove: %s\n", sdp->disk->disk_name));
++
++      /* Need a write lock to set sdp->detached. */
+       write_lock_irqsave(&sg_index_lock, iflags);
+-      if (sdp->headfp) {
+-              sdp->detached = 1;
+-              for (sfp = sdp->headfp; sfp; sfp = tsfp) {
+-                      tsfp = sfp->nextfp;
+-                      for (srp = sfp->headrp; srp; srp = tsrp) {
+-                              tsrp = srp->nextrp;
+-                              if (sfp->closed || (0 == sg_srp_done(srp, sfp)))
+-                                      sg_finish_rem_req(srp);
+-                      }
+-                      if (sfp->closed) {
+-                              scsi_device_put(sdp->device);
+-                              __sg_remove_sfp(sdp, sfp);
+-                      } else {
+-                              delay = 1;
+-                              wake_up_interruptible(&sfp->read_wait);
+-                              kill_fasync(&sfp->async_qp, SIGPOLL,
+-                                          POLL_HUP);
+-                      }
+-              }
+-              SCSI_LOG_TIMEOUT(3, printk("sg_remove: dev=%d, dirty\n", sdp->index));
+-              if (NULL == sdp->headfp) {
+-                      idr_remove(&sg_index_idr, sdp->index);
+-              }
+-      } else {        /* nothing active, simple case */
+-              SCSI_LOG_TIMEOUT(3, printk("sg_remove: dev=%d\n", sdp->index));
+-              idr_remove(&sg_index_idr, sdp->index);
++      sdp->detached = 1;
++      for (sfp = sdp->headfp; sfp; sfp = sfp->nextfp) {
++              wake_up_interruptible(&sfp->read_wait);
++              kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP);
+       }
+       write_unlock_irqrestore(&sg_index_lock, iflags);
+@@ -1538,13 +1536,8 @@
+       device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index));
+       cdev_del(sdp->cdev);
+       sdp->cdev = NULL;
+-      put_disk(sdp->disk);
+-      sdp->disk = NULL;
+-      if (NULL == sdp->headfp)
+-              kfree(sdp);
+-      if (delay)
+-              msleep(10);     /* dirty detach so delay device destruction */
++      sg_put_dev(sdp);
+ }
+ module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR);
+@@ -1939,22 +1932,6 @@
+       return resp;
+ }
+-#ifdef CONFIG_SCSI_PROC_FS
+-static Sg_request *
+-sg_get_nth_request(Sg_fd * sfp, int nth)
+-{
+-      Sg_request *resp;
+-      unsigned long iflags;
+-      int k;
+-
+-      read_lock_irqsave(&sfp->rq_list_lock, iflags);
+-      for (k = 0, resp = sfp->headrp; resp && (k < nth);
+-           ++k, resp = resp->nextrp) ;
+-      read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+-      return resp;
+-}
+-#endif
+-
+ /* always adds to end of list */
+ static Sg_request *
+ sg_add_request(Sg_fd * sfp)
+@@ -2030,22 +2007,6 @@
+       return res;
+ }
+-#ifdef CONFIG_SCSI_PROC_FS
+-static Sg_fd *
+-sg_get_nth_sfp(Sg_device * sdp, int nth)
+-{
+-      Sg_fd *resp;
+-      unsigned long iflags;
+-      int k;
+-
+-      read_lock_irqsave(&sg_index_lock, iflags);
+-      for (k = 0, resp = sdp->headfp; resp && (k < nth);
+-           ++k, resp = resp->nextfp) ;
+-      read_unlock_irqrestore(&sg_index_lock, iflags);
+-      return resp;
+-}
+-#endif
+-
+ static Sg_fd *
+ sg_add_sfp(Sg_device * sdp, int dev)
+ {
+@@ -2060,6 +2021,7 @@
+       init_waitqueue_head(&sfp->read_wait);
+       rwlock_init(&sfp->rq_list_lock);
++      kref_init(&sfp->f_ref);
+       sfp->timeout = SG_DEFAULT_TIMEOUT;
+       sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
+       sfp->force_packid = SG_DEF_FORCE_PACK_ID;
+@@ -2087,15 +2049,54 @@
+       sg_build_reserve(sfp, bufflen);
+       SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp:   bufflen=%d, k_use_sg=%d\n",
+                          sfp->reserve.bufflen, sfp->reserve.k_use_sg));
++
++      kref_get(&sdp->d_ref);
++      __module_get(THIS_MODULE);
+       return sfp;
+ }
+-static void
+-__sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp)
++static void sg_remove_sfp_usercontext(struct work_struct *work)
+ {
++      struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
++      struct sg_device *sdp = sfp->parentdp;
++
++      /* Cleanup any responses which were never read(). */
++      while (sfp->headrp)
++              sg_finish_rem_req(sfp->headrp);
++
++      if (sfp->reserve.bufflen > 0) {
++              SCSI_LOG_TIMEOUT(6,
++                      printk("sg_remove_sfp:    bufflen=%d, k_use_sg=%d\n",
++                              (int) sfp->reserve.bufflen,
++                              (int) sfp->reserve.k_use_sg));
++              sg_remove_scat(&sfp->reserve);
++      }
++
++      SCSI_LOG_TIMEOUT(6,
++              printk("sg_remove_sfp: %s, sfp=0x%p\n",
++                      sdp->disk->disk_name,
++                      sfp));
++      kfree(sfp);
++
++      scsi_device_put(sdp->device);
++      sg_put_dev(sdp);
++      module_put(THIS_MODULE);
++}
++
++static void sg_remove_sfp(struct kref *kref)
++{
++      struct sg_fd *sfp = container_of(kref, struct sg_fd, f_ref);
++      struct sg_device *sdp = sfp->parentdp;
+       Sg_fd *fp;
+       Sg_fd *prev_fp;
++      unsigned long iflags;
++
++      /* CAUTION!  Note that sfp can still be found by walking sdp->headfp
++       * even though the refcount is now 0.  Therefore, unlink sfp from
++       * sdp->headfp BEFORE doing any other cleanup.
++       */
++      write_lock_irqsave(&sg_index_lock, iflags);
+       prev_fp = sdp->headfp;
+       if (sfp == prev_fp)
+               sdp->headfp = prev_fp->nextfp;
+@@ -2108,54 +2109,10 @@
+                       prev_fp = fp;
+               }
+       }
+-      if (sfp->reserve.bufflen > 0) {
+-              SCSI_LOG_TIMEOUT(6, 
+-                      printk("__sg_remove_sfp:    bufflen=%d, k_use_sg=%d\n",
+-                      (int) sfp->reserve.bufflen, (int) sfp->reserve.k_use_sg));
+-              sg_remove_scat(&sfp->reserve);
+-      }
+-      sfp->parentdp = NULL;
+-      SCSI_LOG_TIMEOUT(6, printk("__sg_remove_sfp:    sfp=0x%p\n", sfp));
+-      kfree(sfp);
+-}
+-
+-/* Returns 0 in normal case, 1 when detached and sdp object removed */
+-static int
+-sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp)
+-{
+-      Sg_request *srp;
+-      Sg_request *tsrp;
+-      int dirty = 0;
+-      int res = 0;
+-
+-      for (srp = sfp->headrp; srp; srp = tsrp) {
+-              tsrp = srp->nextrp;
+-              if (sg_srp_done(srp, sfp))
+-                      sg_finish_rem_req(srp);
+-              else
+-                      ++dirty;
+-      }
+-      if (0 == dirty) {
+-              unsigned long iflags;
++      write_unlock_irqrestore(&sg_index_lock, iflags);
++      wake_up_interruptible(&sdp->o_excl_wait);
+-              write_lock_irqsave(&sg_index_lock, iflags);
+-              __sg_remove_sfp(sdp, sfp);
+-              if (sdp->detached && (NULL == sdp->headfp)) {
+-                      idr_remove(&sg_index_idr, sdp->index);
+-                      kfree(sdp);
+-                      res = 1;
+-              }
+-              write_unlock_irqrestore(&sg_index_lock, iflags);
+-      } else {
+-              /* MOD_INC's to inhibit unloading sg and associated adapter driver */
+-              /* only bump the access_count if we actually succeeded in
+-               * throwing another counter on the host module */
+-              scsi_device_get(sdp->device);   /* XXX: retval ignored? */      
+-              sfp->closed = 1;        /* flag dirty state on this fd */
+-              SCSI_LOG_TIMEOUT(1, printk("sg_remove_sfp: worrisome, %d writes pending\n",
+-                                dirty));
+-      }
+-      return res;
++      execute_in_process_context(sg_remove_sfp_usercontext, &sfp->ew);
+ }
+ static int
+@@ -2197,19 +2154,38 @@
+ }
+ #endif
+-static Sg_device *
+-sg_get_dev(int dev)
++/* must be called with sg_index_lock held */
++static Sg_device *sg_lookup_dev(int dev)
+ {
+-      Sg_device *sdp;
+-      unsigned long iflags;
++      return idr_find(&sg_index_idr, dev);
++}
+-      read_lock_irqsave(&sg_index_lock, iflags);
+-      sdp = idr_find(&sg_index_idr, dev);
+-      read_unlock_irqrestore(&sg_index_lock, iflags);
++static Sg_device *sg_get_dev(int dev)
++{
++      struct sg_device *sdp;
++      unsigned long flags;
++
++      read_lock_irqsave(&sg_index_lock, flags);
++      sdp = sg_lookup_dev(dev);
++      if (!sdp)
++              sdp = ERR_PTR(-ENXIO);
++      else if (sdp->detached) {
++              /* If sdp->detached, then the refcount may already be 0, in
++               * which case it would be a bug to do kref_get().
++               */
++              sdp = ERR_PTR(-ENODEV);
++      } else
++              kref_get(&sdp->d_ref);
++      read_unlock_irqrestore(&sg_index_lock, flags);
+       return sdp;
+ }
++static void sg_put_dev(struct sg_device *sdp)
++{
++      kref_put(&sdp->d_ref, sg_device_destroy);
++}
++
+ #ifdef CONFIG_SCSI_PROC_FS
+ static struct proc_dir_entry *sg_proc_sgp = NULL;
+@@ -2466,8 +2442,10 @@
+       struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
+       Sg_device *sdp;
+       struct scsi_device *scsidp;
++      unsigned long iflags;
+-      sdp = it ? sg_get_dev(it->index) : NULL;
++      read_lock_irqsave(&sg_index_lock, iflags);
++      sdp = it ? sg_lookup_dev(it->index) : NULL;
+       if (sdp && (scsidp = sdp->device) && (!sdp->detached))
+               seq_printf(s, "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n",
+                             scsidp->host->host_no, scsidp->channel,
+@@ -2478,6 +2456,7 @@
+                             (int) scsi_device_online(scsidp));
+       else
+               seq_printf(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n");
++      read_unlock_irqrestore(&sg_index_lock, iflags);
+       return 0;
+ }
+@@ -2491,16 +2470,20 @@
+       struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
+       Sg_device *sdp;
+       struct scsi_device *scsidp;
++      unsigned long iflags;
+-      sdp = it ? sg_get_dev(it->index) : NULL;
++      read_lock_irqsave(&sg_index_lock, iflags);
++      sdp = it ? sg_lookup_dev(it->index) : NULL;
+       if (sdp && (scsidp = sdp->device) && (!sdp->detached))
+               seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n",
+                          scsidp->vendor, scsidp->model, scsidp->rev);
+       else
+               seq_printf(s, "<no active device>\n");
++      read_unlock_irqrestore(&sg_index_lock, iflags);
+       return 0;
+ }
++/* must be called while holding sg_index_lock */
+ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
+ {
+       int k, m, new_interface, blen, usg;
+@@ -2510,7 +2493,8 @@
+       const char * cp;
+       unsigned int ms;
+-      for (k = 0; (fp = sg_get_nth_sfp(sdp, k)); ++k) {
++      for (k = 0, fp = sdp->headfp; fp != NULL; ++k, fp = fp->nextfp) {
++              read_lock(&fp->rq_list_lock); /* irqs already disabled */
+               seq_printf(s, "   FD(%d): timeout=%dms bufflen=%d "
+                          "(res)sgat=%d low_dma=%d\n", k + 1,
+                          jiffies_to_msecs(fp->timeout),
+@@ -2520,7 +2504,9 @@
+               seq_printf(s, "   cmd_q=%d f_packid=%d k_orphan=%d closed=%d\n",
+                          (int) fp->cmd_q, (int) fp->force_packid,
+                          (int) fp->keep_orphan, (int) fp->closed);
+-              for (m = 0; (srp = sg_get_nth_request(fp, m)); ++m) {
++              for (m = 0, srp = fp->headrp;
++                              srp != NULL;
++                              ++m, srp = srp->nextrp) {
+                       hp = &srp->header;
+                       new_interface = (hp->interface_id == '\0') ? 0 : 1;
+                       if (srp->res_used) {
+@@ -2557,6 +2543,7 @@
+               }
+               if (0 == m)
+                       seq_printf(s, "     No requests active\n");
++              read_unlock(&fp->rq_list_lock);
+       }
+ }
+@@ -2569,39 +2556,34 @@
+ {
+       struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
+       Sg_device *sdp;
++      unsigned long iflags;
+       if (it && (0 == it->index)) {
+               seq_printf(s, "max_active_device=%d(origin 1)\n",
+                          (int)it->max);
+               seq_printf(s, " def_reserved_size=%d\n", sg_big_buff);
+       }
+-      sdp = it ? sg_get_dev(it->index) : NULL;
+-      if (sdp) {
+-              struct scsi_device *scsidp = sdp->device;
+-              if (NULL == scsidp) {
+-                      seq_printf(s, "device %d detached ??\n", 
+-                                 (int)it->index);
+-                      return 0;
+-              }
++      read_lock_irqsave(&sg_index_lock, iflags);
++      sdp = it ? sg_lookup_dev(it->index) : NULL;
++      if (sdp && sdp->headfp) {
++              struct scsi_device *scsidp = sdp->device;
+-              if (sg_get_nth_sfp(sdp, 0)) {
+-                      seq_printf(s, " >>> device=%s ",
+-                              sdp->disk->disk_name);
+-                      if (sdp->detached)
+-                              seq_printf(s, "detached pending close ");
+-                      else
+-                              seq_printf
+-                                  (s, "scsi%d chan=%d id=%d lun=%d   em=%d",
+-                                   scsidp->host->host_no,
+-                                   scsidp->channel, scsidp->id,
+-                                   scsidp->lun,
+-                                   scsidp->host->hostt->emulated);
+-                      seq_printf(s, " sg_tablesize=%d excl=%d\n",
+-                                 sdp->sg_tablesize, sdp->exclude);
+-              }
++              seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
++              if (sdp->detached)
++                      seq_printf(s, "detached pending close ");
++              else
++                      seq_printf
++                          (s, "scsi%d chan=%d id=%d lun=%d   em=%d",
++                           scsidp->host->host_no,
++                           scsidp->channel, scsidp->id,
++                           scsidp->lun,
++                           scsidp->host->hostt->emulated);
++              seq_printf(s, " sg_tablesize=%d excl=%d\n",
++                         sdp->sg_tablesize, sdp->exclude);
+               sg_proc_debug_helper(s, sdp);
+       }
++      read_unlock_irqrestore(&sg_index_lock, iflags);
+       return 0;
+ }
+--- kernel-maemo-2.6.28.test.orig/drivers/serial/8250.c
++++ kernel-maemo-2.6.28.test/drivers/serial/8250.c
+@@ -2028,6 +2028,20 @@
+       serial8250_set_mctrl(&up->port, up->port.mctrl);
++      /* Serial over Lan (SoL) hack:
++         Intel 8257x Gigabit ethernet chips have a
++         16550 emulation, to be used for Serial Over Lan.
++         Those chips take a longer time than a normal
++         serial device to signalize that a transmission
++         data was queued. Due to that, the above test generally
++         fails. One solution would be to delay the reading of
++         iir. However, this is not reliable, since the timeout
++         is variable. So, let's just don't test if we receive
++         TX irq. This way, we'll never enable UART_BUG_TXEN.
++       */
++      if (up->port.flags & UPF_NO_TXEN_TEST)
++              goto dont_test_tx_en;
++
+       /*
+        * Do a quick test to see if we receive an
+        * interrupt when we enable the TX irq.
+@@ -2047,6 +2061,7 @@
+               up->bugs &= ~UART_BUG_TXEN;
+       }
++dont_test_tx_en:
+       spin_unlock_irqrestore(&up->port.lock, flags);
+       /*
+--- kernel-maemo-2.6.28.test.orig/drivers/serial/8250_pci.c
++++ kernel-maemo-2.6.28.test/drivers/serial/8250_pci.c
+@@ -758,6 +758,21 @@
+       return setup_port(priv, port, bar, offset, board->reg_shift);
+ }
++static int skip_tx_en_setup(struct serial_private *priv,
++                      const struct pciserial_board *board,
++                      struct uart_port *port, int idx)
++{
++      port->flags |= UPF_NO_TXEN_TEST;
++      printk(KERN_DEBUG "serial8250: skipping TxEn test for device "
++                        "[%04x:%04x] subsystem [%04x:%04x]\n",
++                        priv->dev->vendor,
++                        priv->dev->device,
++                        priv->dev->subsystem_vendor,
++                        priv->dev->subsystem_device);
++
++      return pci_default_setup(priv, board, port, idx);
++}
++
+ /* This should be in linux/pci_ids.h */
+ #define PCI_VENDOR_ID_SBSMODULARIO    0x124B
+ #define PCI_SUBVENDOR_ID_SBSMODULARIO 0x124B
+@@ -766,6 +781,8 @@
+ #define PCI_SUBDEVICE_ID_OCTPRO422    0x0208
+ #define PCI_SUBDEVICE_ID_POCTAL232    0x0308
+ #define PCI_SUBDEVICE_ID_POCTAL422    0x0408
++#define PCI_VENDOR_ID_ADVANTECH               0x13fe
++#define PCI_DEVICE_ID_ADVANTECH_PCI3620       0x3620
+ /* Unknown vendors/cards - this should not be in linux/pci_ids.h */
+ #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584       0x1584
+@@ -822,6 +839,27 @@
+               .init           = pci_inteli960ni_init,
+               .setup          = pci_default_setup,
+       },
++      {
++              .vendor         = PCI_VENDOR_ID_INTEL,
++              .device         = PCI_DEVICE_ID_INTEL_8257X_SOL,
++              .subvendor      = PCI_ANY_ID,
++              .subdevice      = PCI_ANY_ID,
++              .setup          = skip_tx_en_setup,
++      },
++      {
++              .vendor         = PCI_VENDOR_ID_INTEL,
++              .device         = PCI_DEVICE_ID_INTEL_82573L_SOL,
++              .subvendor      = PCI_ANY_ID,
++              .subdevice      = PCI_ANY_ID,
++              .setup          = skip_tx_en_setup,
++      },
++      {
++              .vendor         = PCI_VENDOR_ID_INTEL,
++              .device         = PCI_DEVICE_ID_INTEL_82573E_SOL,
++              .subvendor      = PCI_ANY_ID,
++              .subdevice      = PCI_ANY_ID,
++              .setup          = skip_tx_en_setup,
++      },
+       /*
+        * ITE
+        */
+@@ -2132,6 +2170,10 @@
+ #endif
+ static struct pci_device_id serial_pci_tbl[] = {
++      /* Advantech use PCI_DEVICE_ID_ADVANTECH_PCI3620 (0x3620) as 'PCI_SUBVENDOR_ID' */
++      {       PCI_VENDOR_ID_ADVANTECH, PCI_DEVICE_ID_ADVANTECH_PCI3620,
++              PCI_DEVICE_ID_ADVANTECH_PCI3620, 0x0001, 0, 0,
++              pbn_b2_8_921600 },
+       {       PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V960,
+               PCI_SUBVENDOR_ID_CONNECT_TECH,
+               PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_232, 0, 0,
+@@ -2271,6 +2313,9 @@
+       {       PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_COMM8,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_b2_8_115200 },
++      {       PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_7803,
++              PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++              pbn_b2_8_460800 },
+       {       PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_UCOMM8,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_b2_8_115200 },
+@@ -2372,6 +2417,9 @@
+                * For now just used the hex ID 0x950a.
+                */
+       {       PCI_VENDOR_ID_OXSEMI, 0x950a,
++              PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_DUAL_SERIAL, 0, 0,
++              pbn_b0_2_115200 },
++      {       PCI_VENDOR_ID_OXSEMI, 0x950a,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_b0_2_1130000 },
+       {       PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954,
+--- kernel-maemo-2.6.28.test.orig/drivers/serial/jsm/jsm_driver.c
++++ kernel-maemo-2.6.28.test/drivers/serial/jsm/jsm_driver.c
+@@ -84,6 +84,8 @@
+       brd->pci_dev = pdev;
+       if (pdev->device == PCIE_DEVICE_ID_NEO_4_IBM)
+               brd->maxports = 4;
++      else if (pdev->device == PCI_DEVICE_ID_DIGI_NEO_8)
++              brd->maxports = 8;
+       else
+               brd->maxports = 2;
+@@ -212,6 +214,7 @@
+       { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_NEO_2RJ45), 0, 0, 2 },
+       { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_NEO_2RJ45PRI), 0, 0, 3 },
+       { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCIE_DEVICE_ID_NEO_4_IBM), 0, 0, 4 },
++      { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_NEO_8), 0, 0, 5 },
+       { 0, }
+ };
+ MODULE_DEVICE_TABLE(pci, jsm_pci_tbl);
+--- kernel-maemo-2.6.28.test.orig/drivers/serial/jsm/jsm_tty.c
++++ kernel-maemo-2.6.28.test/drivers/serial/jsm/jsm_tty.c
+@@ -161,6 +161,11 @@
+       channel->ch_bd->bd_ops->disable_receiver(channel);
+ }
++static void jsm_tty_enable_ms(struct uart_port *port)
++{
++      /* Nothing needed */
++}
++
+ static void jsm_tty_break(struct uart_port *port, int break_state)
+ {
+       unsigned long lock_flags;
+@@ -345,6 +350,7 @@
+       .start_tx       = jsm_tty_start_tx,
+       .send_xchar     = jsm_tty_send_xchar,
+       .stop_rx        = jsm_tty_stop_rx,
++      .enable_ms      = jsm_tty_enable_ms,
+       .break_ctl      = jsm_tty_break,
+       .startup        = jsm_tty_open,
+       .shutdown       = jsm_tty_close,
+--- kernel-maemo-2.6.28.test.orig/drivers/spi/spi.c
++++ kernel-maemo-2.6.28.test/drivers/spi/spi.c
+@@ -660,7 +660,7 @@
+       int                     status;
+       struct spi_message      message;
+-      struct spi_transfer     x;
++      struct spi_transfer     x[2];
+       u8                      *local_buf;
+       /* Use preallocated DMA-safe buffer.  We can't avoid copying here,
+@@ -671,9 +671,15 @@
+               return -EINVAL;
+       spi_message_init(&message);
+-      memset(&x, 0, sizeof x);
+-      x.len = n_tx + n_rx;
+-      spi_message_add_tail(&x, &message);
++      memset(x, 0, sizeof x);
++      if (n_tx) {
++              x[0].len = n_tx;
++              spi_message_add_tail(&x[0], &message);
++      }
++      if (n_rx) {
++              x[1].len = n_rx;
++              spi_message_add_tail(&x[1], &message);
++      }
+       /* ... unless someone else is using the pre-allocated buffer */
+       if (!mutex_trylock(&lock)) {
+@@ -684,15 +690,15 @@
+               local_buf = buf;
+       memcpy(local_buf, txbuf, n_tx);
+-      x.tx_buf = local_buf;
+-      x.rx_buf = local_buf;
++      x[0].tx_buf = local_buf;
++      x[1].rx_buf = local_buf + n_tx;
+       /* do the i/o */
+       status = spi_sync(spi, &message);
+       if (status == 0)
+-              memcpy(rxbuf, x.rx_buf + n_tx, n_rx);
++              memcpy(rxbuf, x[1].rx_buf, n_rx);
+-      if (x.tx_buf == buf)
++      if (x[0].tx_buf == buf)
+               mutex_unlock(&lock);
+       else
+               kfree(local_buf);
+--- kernel-maemo-2.6.28.test.orig/drivers/usb/class/cdc-acm.c
++++ kernel-maemo-2.6.28.test/drivers/usb/class/cdc-acm.c
+@@ -1370,6 +1370,8 @@
+       { USB_DEVICE(0x0572, 0x1321), /* Conexant USB MODEM CX93010 */
+       .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+       },
++      { USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
++      },
+       /* control interfaces with various AT-command sets */
+       { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
+--- kernel-maemo-2.6.28.test.orig/drivers/usb/class/cdc-wdm.c
++++ kernel-maemo-2.6.28.test/drivers/usb/class/cdc-wdm.c
+@@ -652,7 +652,7 @@
+       iface = &intf->altsetting[0];
+       ep = &iface->endpoint[0].desc;
+-      if (!usb_endpoint_is_int_in(ep)) {
++      if (!ep || !usb_endpoint_is_int_in(ep)) {
+               rv = -EINVAL;
+               goto err;
+       }
+--- kernel-maemo-2.6.28.test.orig/drivers/usb/class/usbtmc.c
++++ kernel-maemo-2.6.28.test/drivers/usb/class/usbtmc.c
+@@ -49,6 +49,7 @@
+ static struct usb_device_id usbtmc_devices[] = {
+       { USB_INTERFACE_INFO(USB_CLASS_APP_SPEC, 3, 0), },
++      { USB_INTERFACE_INFO(USB_CLASS_APP_SPEC, 3, 1), },
+       { 0, } /* terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, usbtmc_devices);
+@@ -105,12 +106,13 @@
+ {
+       struct usb_interface *intf;
+       struct usbtmc_device_data *data;
+-      int retval = -ENODEV;
++      int retval = 0;
+       intf = usb_find_interface(&usbtmc_driver, iminor(inode));
+       if (!intf) {
+               printk(KERN_ERR KBUILD_MODNAME
+                      ": can not find device for minor %d", iminor(inode));
++              retval = -ENODEV;
+               goto exit;
+       }
+--- kernel-maemo-2.6.28.test.orig/drivers/usb/core/devio.c
++++ kernel-maemo-2.6.28.test/drivers/usb/core/devio.c
+@@ -359,11 +359,6 @@
+               spin_lock_irqsave(&ps->lock, flags);
+       }
+       spin_unlock_irqrestore(&ps->lock, flags);
+-      as = async_getcompleted(ps);
+-      while (as) {
+-              free_async(as);
+-              as = async_getcompleted(ps);
+-      }
+ }
+ static void destroy_async_on_interface(struct dev_state *ps,
+@@ -642,6 +637,7 @@
+       struct dev_state *ps = file->private_data;
+       struct usb_device *dev = ps->dev;
+       unsigned int ifnum;
++      struct async *as;
+       usb_lock_device(dev);
+@@ -660,6 +656,12 @@
+       usb_unlock_device(dev);
+       usb_put_dev(dev);
+       put_pid(ps->disc_pid);
++
++      as = async_getcompleted(ps);
++      while (as) {
++              free_async(as);
++              as = async_getcompleted(ps);
++      }
+       kfree(ps);
+       return 0;
+ }
+@@ -1703,7 +1705,7 @@
+       .release =      usbdev_release,
+ };
+-void usb_fs_classdev_common_remove(struct usb_device *udev)
++static void usbdev_remove(struct usb_device *udev)
+ {
+       struct dev_state *ps;
+       struct siginfo sinfo;
+@@ -1745,10 +1747,15 @@
+ {
+       if (dev->usb_classdev)
+               device_unregister(dev->usb_classdev);
+-      usb_fs_classdev_common_remove(dev);
+ }
+-static int usb_classdev_notify(struct notifier_block *self,
++#else
++#define usb_classdev_add(dev)         0
++#define usb_classdev_remove(dev)      do {} while (0)
++
++#endif
++
++static int usbdev_notify(struct notifier_block *self,
+                              unsigned long action, void *dev)
+ {
+       switch (action) {
+@@ -1758,15 +1765,15 @@
+               break;
+       case USB_DEVICE_REMOVE:
+               usb_classdev_remove(dev);
++              usbdev_remove(dev);
+               break;
+       }
+       return NOTIFY_OK;
+ }
+ static struct notifier_block usbdev_nb = {
+-      .notifier_call =        usb_classdev_notify,
++      .notifier_call =        usbdev_notify,
+ };
+-#endif
+ static struct cdev usb_device_cdev;
+@@ -1801,9 +1808,8 @@
+        * to /sys/dev
+        */
+       usb_classdev_class->dev_kobj = NULL;
+-
+-      usb_register_notify(&usbdev_nb);
+ #endif
++      usb_register_notify(&usbdev_nb);
+ out:
+       return retval;
+@@ -1814,8 +1820,8 @@
+ void usb_devio_cleanup(void)
+ {
+-#ifdef CONFIG_USB_DEVICE_CLASS
+       usb_unregister_notify(&usbdev_nb);
++#ifdef CONFIG_USB_DEVICE_CLASS
+       class_destroy(usb_classdev_class);
+ #endif
+       cdev_del(&usb_device_cdev);
+--- kernel-maemo-2.6.28.test.orig/drivers/usb/core/driver.c
++++ kernel-maemo-2.6.28.test/drivers/usb/core/driver.c
+@@ -269,7 +269,7 @@
+        * supports "soft" unbinding.
+        */
+       if (!driver->soft_unbind)
+-              usb_disable_interface(udev, intf);
++              usb_disable_interface(udev, intf, false);
+       driver->disconnect(intf);
+@@ -279,9 +279,12 @@
+        * altsetting means creating new endpoint device entries).
+        * When either of these happens, defer the Set-Interface.
+        */
+-      if (intf->cur_altsetting->desc.bAlternateSetting == 0)
+-              ;       /* Already in altsetting 0 so skip Set-Interface */
+-      else if (!error && intf->dev.power.status == DPM_ON)
++      if (intf->cur_altsetting->desc.bAlternateSetting == 0) {
++              /* Already in altsetting 0 so skip Set-Interface.
++               * Just re-enable it without affecting the endpoint toggles.
++               */
++              usb_enable_interface(udev, intf, false);
++      } else if (!error && intf->dev.power.status == DPM_ON)
+               usb_set_interface(udev, intf->altsetting[0].
+                               desc.bInterfaceNumber, 0);
+       else
+--- kernel-maemo-2.6.28.test.orig/drivers/usb/core/hub.c
++++ kernel-maemo-2.6.28.test/drivers/usb/core/hub.c
+@@ -2383,9 +2383,9 @@
+ void usb_ep0_reinit(struct usb_device *udev)
+ {
+-      usb_disable_endpoint(udev, 0 + USB_DIR_IN);
+-      usb_disable_endpoint(udev, 0 + USB_DIR_OUT);
+-      usb_enable_endpoint(udev, &udev->ep0);
++      usb_disable_endpoint(udev, 0 + USB_DIR_IN, true);
++      usb_disable_endpoint(udev, 0 + USB_DIR_OUT, true);
++      usb_enable_endpoint(udev, &udev->ep0, true);
+ }
+ EXPORT_SYMBOL_GPL(usb_ep0_reinit);
+--- kernel-maemo-2.6.28.test.orig/drivers/usb/core/inode.c
++++ kernel-maemo-2.6.28.test/drivers/usb/core/inode.c
+@@ -718,7 +718,6 @@
+               fs_remove_file (dev->usbfs_dentry);
+               dev->usbfs_dentry = NULL;
+       }
+-      usb_fs_classdev_common_remove(dev);
+ }
+ static int usbfs_notify(struct notifier_block *self, unsigned long action, void *dev)
+--- kernel-maemo-2.6.28.test.orig/drivers/usb/core/message.c
++++ kernel-maemo-2.6.28.test/drivers/usb/core/message.c
+@@ -651,7 +651,7 @@
+               if (result <= 0 && result != -ETIMEDOUT)
+                       continue;
+               if (result > 1 && ((u8 *)buf)[1] != type) {
+-                      result = -EPROTO;
++                      result = -ENODATA;
+                       continue;
+               }
+               break;
+@@ -694,8 +694,13 @@
+                       USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
+                       (USB_DT_STRING << 8) + index, langid, buf, size,
+                       USB_CTRL_GET_TIMEOUT);
+-              if (!(result == 0 || result == -EPIPE))
+-                      break;
++              if (result == 0 || result == -EPIPE)
++                      continue;
++              if (result > 1 && ((u8 *) buf)[1] != USB_DT_STRING) {
++                      result = -ENODATA;
++                      continue;
++              }
++              break;
+       }
+       return result;
+ }
+@@ -1009,14 +1014,15 @@
+  * @dev: the device whose endpoint is being disabled
+  * @epaddr: the endpoint's address.  Endpoint number for output,
+  *    endpoint number + USB_DIR_IN for input
++ * @reset_hardware: flag to erase any endpoint state stored in the
++ *    controller hardware
+  *
+- * Deallocates hcd/hardware state for this endpoint ... and nukes all
+- * pending urbs.
+- *
+- * If the HCD hasn't registered a disable() function, this sets the
+- * endpoint's maxpacket size to 0 to prevent further submissions.
++ * Disables the endpoint for URB submission and nukes all pending URBs.
++ * If @reset_hardware is set then also deallocates hcd/hardware state
++ * for the endpoint.
+  */
+-void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr)
++void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr,
++              bool reset_hardware)
+ {
+       unsigned int epnum = epaddr & USB_ENDPOINT_NUMBER_MASK;
+       struct usb_host_endpoint *ep;
+@@ -1026,15 +1032,18 @@
+       if (usb_endpoint_out(epaddr)) {
+               ep = dev->ep_out[epnum];
+-              dev->ep_out[epnum] = NULL;
++              if (reset_hardware)
++                      dev->ep_out[epnum] = NULL;
+       } else {
+               ep = dev->ep_in[epnum];
+-              dev->ep_in[epnum] = NULL;
++              if (reset_hardware)
++                      dev->ep_in[epnum] = NULL;
+       }
+       if (ep) {
+               ep->enabled = 0;
+               usb_hcd_flush_endpoint(dev, ep);
+-              usb_hcd_disable_endpoint(dev, ep);
++              if (reset_hardware)
++                      usb_hcd_disable_endpoint(dev, ep);
+       }
+ }
+@@ -1042,17 +1051,21 @@
+  * usb_disable_interface -- Disable all endpoints for an interface
+  * @dev: the device whose interface is being disabled
+  * @intf: pointer to the interface descriptor
++ * @reset_hardware: flag to erase any endpoint state stored in the
++ *    controller hardware
+  *
+  * Disables all the endpoints for the interface's current altsetting.
+  */
+-void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf)
++void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf,
++              bool reset_hardware)
+ {
+       struct usb_host_interface *alt = intf->cur_altsetting;
+       int i;
+       for (i = 0; i < alt->desc.bNumEndpoints; ++i) {
+               usb_disable_endpoint(dev,
+-                              alt->endpoint[i].desc.bEndpointAddress);
++                              alt->endpoint[i].desc.bEndpointAddress,
++                              reset_hardware);
+       }
+ }
+@@ -1073,8 +1086,8 @@
+       dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__,
+               skip_ep0 ? "non-ep0" : "all");
+       for (i = skip_ep0; i < 16; ++i) {
+-              usb_disable_endpoint(dev, i);
+-              usb_disable_endpoint(dev, i + USB_DIR_IN);
++              usb_disable_endpoint(dev, i, true);
++              usb_disable_endpoint(dev, i + USB_DIR_IN, true);
+       }
+       dev->toggle[0] = dev->toggle[1] = 0;
+@@ -1113,22 +1126,26 @@
+  * usb_enable_endpoint - Enable an endpoint for USB communications
+  * @dev: the device whose interface is being enabled
+  * @ep: the endpoint
++ * @reset_toggle: flag to set the endpoint's toggle back to 0
+  *
+- * Resets the endpoint toggle, and sets dev->ep_{in,out} pointers.
++ * Resets the endpoint toggle if asked, and sets dev->ep_{in,out} pointers.
+  * For control endpoints, both the input and output sides are handled.
+  */
+-void usb_enable_endpoint(struct usb_device *dev, struct usb_host_endpoint *ep)
++void usb_enable_endpoint(struct usb_device *dev, struct usb_host_endpoint *ep,
++              bool reset_toggle)
+ {
+       int epnum = usb_endpoint_num(&ep->desc);
+       int is_out = usb_endpoint_dir_out(&ep->desc);
+       int is_control = usb_endpoint_xfer_control(&ep->desc);
+       if (is_out || is_control) {
+-              usb_settoggle(dev, epnum, 1, 0);
++              if (reset_toggle)
++                      usb_settoggle(dev, epnum, 1, 0);
+               dev->ep_out[epnum] = ep;
+       }
+       if (!is_out || is_control) {
+-              usb_settoggle(dev, epnum, 0, 0);
++              if (reset_toggle)
++                      usb_settoggle(dev, epnum, 0, 0);
+               dev->ep_in[epnum] = ep;
+       }
+       ep->enabled = 1;
+@@ -1138,17 +1155,18 @@
+  * usb_enable_interface - Enable all the endpoints for an interface
+  * @dev: the device whose interface is being enabled
+  * @intf: pointer to the interface descriptor
++ * @reset_toggles: flag to set the endpoints' toggles back to 0
+  *
+  * Enables all the endpoints for the interface's current altsetting.
+  */
+-static void usb_enable_interface(struct usb_device *dev,
+-                               struct usb_interface *intf)
++void usb_enable_interface(struct usb_device *dev,
++              struct usb_interface *intf, bool reset_toggles)
+ {
+       struct usb_host_interface *alt = intf->cur_altsetting;
+       int i;
+       for (i = 0; i < alt->desc.bNumEndpoints; ++i)
+-              usb_enable_endpoint(dev, &alt->endpoint[i]);
++              usb_enable_endpoint(dev, &alt->endpoint[i], reset_toggles);
+ }
+ /**
+@@ -1237,7 +1255,7 @@
+       /* prevent submissions using previous endpoint settings */
+       if (iface->cur_altsetting != alt)
+               usb_remove_sysfs_intf_files(iface);
+-      usb_disable_interface(dev, iface);
++      usb_disable_interface(dev, iface, true);
+       iface->cur_altsetting = alt;
+@@ -1271,7 +1289,7 @@
+        * during the SETUP stage - hence EP0 toggles are "don't care" here.
+        * (Likewise, EP0 never "halts" on well designed devices.)
+        */
+-      usb_enable_interface(dev, iface);
++      usb_enable_interface(dev, iface, true);
+       if (device_is_registered(&iface->dev))
+               usb_create_sysfs_intf_files(iface);
+@@ -1315,8 +1333,8 @@
+        */
+       for (i = 1; i < 16; ++i) {
+-              usb_disable_endpoint(dev, i);
+-              usb_disable_endpoint(dev, i + USB_DIR_IN);
++              usb_disable_endpoint(dev, i, true);
++              usb_disable_endpoint(dev, i + USB_DIR_IN, true);
+       }
+       config = dev->actconfig;
+@@ -1346,7 +1364,7 @@
+                       alt = &intf->altsetting[0];
+               intf->cur_altsetting = alt;
+-              usb_enable_interface(dev, intf);
++              usb_enable_interface(dev, intf, true);
+               if (device_is_registered(&intf->dev))
+                       usb_create_sysfs_intf_files(intf);
+       }
+@@ -1604,7 +1622,7 @@
+                       alt = &intf->altsetting[0];
+               intf->cur_altsetting = alt;
+-              usb_enable_interface(dev, intf);
++              usb_enable_interface(dev, intf, true);
+               intf->dev.parent = &dev->dev;
+               intf->dev.driver = NULL;
+               intf->dev.bus = &usb_bus_type;
+@@ -1619,7 +1637,8 @@
+       }
+       kfree(new_interfaces);
+-      if (cp->string == NULL)
++      if (cp->string == NULL &&
++                      !(dev->quirks & USB_QUIRK_CONFIG_INTF_STRINGS))
+               cp->string = usb_cache_string(dev, cp->desc.iConfiguration);
+       /* Now that all the interfaces are set up, register them
+--- kernel-maemo-2.6.28.test.orig/drivers/usb/core/quirks.c
++++ kernel-maemo-2.6.28.test/drivers/usb/core/quirks.c
+@@ -54,6 +54,10 @@
+       { USB_DEVICE(0x0638, 0x0a13), .driver_info =
+         USB_QUIRK_STRING_FETCH_255 },
++      /* Saitek Cyborg Gold Joystick */
++      { USB_DEVICE(0x06a3, 0x0006), .driver_info =
++                      USB_QUIRK_CONFIG_INTF_STRINGS },
++
+       /* M-Systems Flash Disk Pioneers */
+       { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
+--- kernel-maemo-2.6.28.test.orig/drivers/usb/core/sysfs.c
++++ kernel-maemo-2.6.28.test/drivers/usb/core/sysfs.c
+@@ -13,6 +13,7 @@
+ #include <linux/kernel.h>
+ #include <linux/string.h>
+ #include <linux/usb.h>
++#include <linux/usb/quirks.h>
+ #include "usb.h"
+ /* Active configuration fields */
+@@ -847,7 +848,8 @@
+        * and missing in others.  Hence its attribute cannot be created
+        * before the uevent is broadcast.
+        */
+-      if (alt->string == NULL)
++      if (alt->string == NULL &&
++                      !(udev->quirks & USB_QUIRK_CONFIG_INTF_STRINGS))
+               alt->string = usb_cache_string(udev, alt->desc.iInterface);
+       if (alt->string)
+               retval = device_create_file(&intf->dev, &dev_attr_interface);
+--- kernel-maemo-2.6.28.test.orig/drivers/usb/core/usb.c
++++ kernel-maemo-2.6.28.test/drivers/usb/core/usb.c
+@@ -362,7 +362,7 @@
+       dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
+       dev->ep0.desc.bDescriptorType = USB_DT_ENDPOINT;
+       /* ep0 maxpacket comes later, from device descriptor */
+-      usb_enable_endpoint(dev, &dev->ep0);
++      usb_enable_endpoint(dev, &dev->ep0, true);
+       dev->can_submit = 1;
+       /* Save readable and stable topology id, distinguishing devices
+--- kernel-maemo-2.6.28.test.orig/drivers/usb/core/usb.h
++++ kernel-maemo-2.6.28.test/drivers/usb/core/usb.h
+@@ -10,10 +10,13 @@
+ extern void usb_remove_ep_files(struct usb_host_endpoint *endpoint);
+ extern void usb_enable_endpoint(struct usb_device *dev,
+-              struct usb_host_endpoint *ep);
+-extern void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr);
++              struct usb_host_endpoint *ep, bool reset_toggle);
++extern void usb_enable_interface(struct usb_device *dev,
++              struct usb_interface *intf, bool reset_toggles);
++extern void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr,
++              bool reset_hardware);
+ extern void usb_disable_interface(struct usb_device *dev,
+-              struct usb_interface *intf);
++              struct usb_interface *intf, bool reset_hardware);
+ extern void usb_release_interface_cache(struct kref *ref);
+ extern void usb_disable_device(struct usb_device *dev, int skip_ep0);
+ extern int usb_deauthorize_device(struct usb_device *);
+@@ -145,7 +148,6 @@
+ extern const struct file_operations usbfs_devices_fops;
+ extern const struct file_operations usbdev_file_operations;
+ extern void usbfs_conn_disc_event(void);
+-extern void usb_fs_classdev_common_remove(struct usb_device *udev);
+ extern int usb_devio_init(void);
+ extern void usb_devio_cleanup(void);
+--- kernel-maemo-2.6.28.test.orig/drivers/usb/gadget/f_rndis.c
++++ kernel-maemo-2.6.28.test/drivers/usb/gadget/f_rndis.c
+@@ -437,7 +437,7 @@
+               DBG(cdev, "rndis req%02x.%02x v%04x i%04x l%d\n",
+                       ctrl->bRequestType, ctrl->bRequest,
+                       w_value, w_index, w_length);
+-              req->zero = 0;
++              req->zero = (value < w_length);
+               req->length = value;
+               value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+               if (value < 0)
+--- kernel-maemo-2.6.28.test.orig/drivers/usb/gadget/u_ether.c
++++ kernel-maemo-2.6.28.test/drivers/usb/gadget/u_ether.c
+@@ -174,12 +174,6 @@
+       strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof p->bus_info);
+ }
+-static u32 eth_get_link(struct net_device *net)
+-{
+-      struct eth_dev  *dev = netdev_priv(net);
+-      return dev->gadget->speed != USB_SPEED_UNKNOWN;
+-}
+-
+ /* REVISIT can also support:
+  *   - WOL (by tracking suspends and issuing remote wakeup)
+  *   - msglevel (implies updated messaging)
+@@ -188,7 +182,7 @@
+ static struct ethtool_ops ops = {
+       .get_drvinfo = eth_get_drvinfo,
+-      .get_link = eth_get_link
++      .get_link = ethtool_op_get_link,
+ };
+ static void defer_kevent(struct eth_dev *dev, int flag)
+--- kernel-maemo-2.6.28.test.orig/drivers/usb/host/ehci-hcd.c
++++ kernel-maemo-2.6.28.test/drivers/usb/host/ehci-hcd.c
+@@ -485,6 +485,7 @@
+        * periodic_size can shrink by USBCMD update if hcc_params allows.
+        */
+       ehci->periodic_size = DEFAULT_I_TDPS;
++      INIT_LIST_HEAD(&ehci->cached_itd_list);
+       if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0)
+               return retval;
+@@ -497,6 +498,7 @@
+       ehci->reclaim = NULL;
+       ehci->next_uframe = -1;
++      ehci->clock_frame = -1;
+       /*
+        * dedicate a qh for the async ring head, since we couldn't unlink
+--- kernel-maemo-2.6.28.test.orig/drivers/usb/host/ehci-mem.c
++++ kernel-maemo-2.6.28.test/drivers/usb/host/ehci-mem.c
+@@ -128,6 +128,7 @@
+ static void ehci_mem_cleanup (struct ehci_hcd *ehci)
+ {
++      free_cached_itd_list(ehci);
+       if (ehci->async)
+               qh_put (ehci->async);
+       ehci->async = NULL;
+--- kernel-maemo-2.6.28.test.orig/drivers/usb/host/ehci-q.c
++++ kernel-maemo-2.6.28.test/drivers/usb/host/ehci-q.c
+@@ -333,12 +333,40 @@
+               token = hc32_to_cpu(ehci, qtd->hw_token);
+               /* always clean up qtds the hc de-activated */
++ retry_xacterr:
+               if ((token & QTD_STS_ACTIVE) == 0) {
+                       /* on STALL, error, and short reads this urb must
+                        * complete and all its qtds must be recycled.
+                        */
+                       if ((token & QTD_STS_HALT) != 0) {
++
++                              /* retry transaction errors until we
++                               * reach the software xacterr limit
++                               */
++                              if ((token & QTD_STS_XACT) &&
++                                              QTD_CERR(token) == 0 &&
++                                              --qh->xacterrs > 0 &&
++                                              !urb->unlinked) {
++                                      ehci_dbg(ehci,
++      "detected XactErr len %d/%d retry %d\n",
++      qtd->length - QTD_LENGTH(token), qtd->length,
++      QH_XACTERR_MAX - qh->xacterrs);
++
++                                      /* reset the token in the qtd and the
++                                       * qh overlay (which still contains
++                                       * the qtd) so that we pick up from
++                                       * where we left off
++                                       */
++                                      token &= ~QTD_STS_HALT;
++                                      token |= QTD_STS_ACTIVE |
++                                                      (EHCI_TUNE_CERR << 10);
++                                      qtd->hw_token = cpu_to_hc32(ehci,
++                                                      token);
++                                      wmb();
++                                      qh->hw_token = cpu_to_hc32(ehci, token);
++                                      goto retry_xacterr;
++                              }
+                               stopped = 1;
+                       /* magic dummy for some short reads; qh won't advance.
+@@ -421,6 +449,9 @@
+               /* remove qtd; it's recycled after possible urb completion */
+               list_del (&qtd->qtd_list);
+               last = qtd;
++
++              /* reinit the xacterr counter for the next qtd */
++              qh->xacterrs = QH_XACTERR_MAX;
+       }
+       /* last urb's completion might still need calling */
+@@ -862,6 +893,7 @@
+       head->qh_next.qh = qh;
+       head->hw_next = dma;
++      qh->xacterrs = QH_XACTERR_MAX;
+       qh->qh_state = QH_STATE_LINKED;
+       /* qtd completions reported later by interrupt */
+ }
+@@ -1095,7 +1127,8 @@
+       prev->qh_next = qh->qh_next;
+       wmb ();
+-      if (unlikely (ehci_to_hcd(ehci)->state == HC_STATE_HALT)) {
++      /* If the controller isn't running, we don't have to wait for it */
++      if (unlikely(!HC_IS_RUNNING(ehci_to_hcd(ehci)->state))) {
+               /* if (unlikely (qh->reclaim != 0))
+                *      this will recurse, probably not much
+                */
+--- kernel-maemo-2.6.28.test.orig/drivers/usb/host/ehci-sched.c
++++ kernel-maemo-2.6.28.test/drivers/usb/host/ehci-sched.c
+@@ -1004,7 +1004,8 @@
+               is_in = (stream->bEndpointAddress & USB_DIR_IN) ? 0x10 : 0;
+               stream->bEndpointAddress &= 0x0f;
+-              stream->ep->hcpriv = NULL;
++              if (stream->ep)
++                      stream->ep->hcpriv = NULL;
+               if (stream->rescheduled) {
+                       ehci_info (ehci, "ep%d%s-iso rescheduled "
+@@ -1535,7 +1536,7 @@
+                                       struct ehci_itd, itd_list);
+                       list_move_tail (&itd->itd_list, &stream->td_list);
+                       itd->stream = iso_stream_get (stream);
+-                      itd->urb = usb_get_urb (urb);
++                      itd->urb = urb;
+                       itd_init (ehci, stream, itd);
+               }
+@@ -1644,7 +1645,7 @@
+       (void) disable_periodic(ehci);
+       ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
+-      if (unlikely (list_empty (&stream->td_list))) {
++      if (unlikely(list_is_singular(&stream->td_list))) {
+               ehci_to_hcd(ehci)->self.bandwidth_allocated
+                               -= stream->bandwidth;
+               ehci_vdbg (ehci,
+@@ -1653,14 +1654,27 @@
+                       (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
+       }
+       iso_stream_put (ehci, stream);
+-      /* OK to recycle this ITD now that its completion callback ran. */
++
+ done:
+-      usb_put_urb(urb);
+       itd->urb = NULL;
+-      itd->stream = NULL;
+-      list_move(&itd->itd_list, &stream->free_list);
+-      iso_stream_put(ehci, stream);
+-
++      if (ehci->clock_frame != itd->frame || itd->index[7] != -1) {
++              /* OK to recycle this ITD now. */
++              itd->stream = NULL;
++              list_move(&itd->itd_list, &stream->free_list);
++              iso_stream_put(ehci, stream);
++      } else {
++              /* HW might remember this ITD, so we can't recycle it yet.
++               * Move it to a safe place until a new frame starts.
++               */
++              list_move(&itd->itd_list, &ehci->cached_itd_list);
++              if (stream->refcount == 2) {
++                      /* If iso_stream_put() were called here, stream
++                       * would be freed.  Instead, just prevent reuse.
++                       */
++                      stream->ep->hcpriv = NULL;
++                      stream->ep = NULL;
++              }
++      }
+       return retval;
+ }
+@@ -1934,7 +1948,7 @@
+                               struct ehci_sitd, sitd_list);
+               list_move_tail (&sitd->sitd_list, &stream->td_list);
+               sitd->stream = iso_stream_get (stream);
+-              sitd->urb = usb_get_urb (urb);
++              sitd->urb = urb;
+               sitd_patch(ehci, stream, sitd, sched, packet);
+               sitd_link (ehci, (next_uframe >> 3) % ehci->periodic_size,
+@@ -2019,7 +2033,7 @@
+       (void) disable_periodic(ehci);
+       ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
+-      if (list_empty (&stream->td_list)) {
++      if (list_is_singular(&stream->td_list)) {
+               ehci_to_hcd(ehci)->self.bandwidth_allocated
+                               -= stream->bandwidth;
+               ehci_vdbg (ehci,
+@@ -2030,7 +2044,6 @@
+       iso_stream_put (ehci, stream);
+       /* OK to recycle this SITD now that its completion callback ran. */
+ done:
+-      usb_put_urb(urb);
+       sitd->urb = NULL;
+       sitd->stream = NULL;
+       list_move(&sitd->sitd_list, &stream->free_list);
+@@ -2101,6 +2114,20 @@
+ /*-------------------------------------------------------------------------*/
++static void free_cached_itd_list(struct ehci_hcd *ehci)
++{
++      struct ehci_itd *itd, *n;
++
++      list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) {
++              struct ehci_iso_stream  *stream = itd->stream;
++              itd->stream = NULL;
++              list_move(&itd->itd_list, &stream->free_list);
++              iso_stream_put(ehci, stream);
++      }
++}
++
++/*-------------------------------------------------------------------------*/
++
+ static void
+ scan_periodic (struct ehci_hcd *ehci)
+ {
+@@ -2115,10 +2142,17 @@
+        * Touches as few pages as possible:  cache-friendly.
+        */
+       now_uframe = ehci->next_uframe;
+-      if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state))
++      if (HC_IS_RUNNING(ehci_to_hcd(ehci)->state)) {
+               clock = ehci_readl(ehci, &ehci->regs->frame_index);
+-      else
++              clock_frame = (clock >> 3) % ehci->periodic_size;
++      } else  {
+               clock = now_uframe + mod - 1;
++              clock_frame = -1;
++      }
++      if (ehci->clock_frame != clock_frame) {
++              free_cached_itd_list(ehci);
++              ehci->clock_frame = clock_frame;
++      }
+       clock %= mod;
+       clock_frame = clock >> 3;
+@@ -2277,6 +2311,10 @@
+                       /* rescan the rest of this frame, then ... */
+                       clock = now;
+                       clock_frame = clock >> 3;
++                      if (ehci->clock_frame != clock_frame) {
++                              free_cached_itd_list(ehci);
++                              ehci->clock_frame = clock_frame;
++                      }
+               } else {
+                       now_uframe++;
+                       now_uframe %= mod;
+--- kernel-maemo-2.6.28.test.orig/drivers/usb/host/ehci.h
++++ kernel-maemo-2.6.28.test/drivers/usb/host/ehci.h
+@@ -87,6 +87,10 @@
+       int                     next_uframe;    /* scan periodic, start here */
+       unsigned                periodic_sched; /* periodic activity count */
++      /* list of itds completed while clock_frame was still active */
++      struct list_head        cached_itd_list;
++      unsigned                clock_frame;
++
+       /* per root hub port */
+       unsigned long           reset_done [EHCI_MAX_ROOT_PORTS];
+@@ -210,6 +214,8 @@
+       }
+ }
++static void free_cached_itd_list(struct ehci_hcd *ehci);
++
+ /*-------------------------------------------------------------------------*/
+ #include <linux/usb/ehci_def.h>
+@@ -360,6 +366,9 @@
+ #define       QH_STATE_UNLINK_WAIT    4               /* LINKED and on reclaim q */
+ #define       QH_STATE_COMPLETING     5               /* don't touch token.HALT */
++      u8                      xacterrs;       /* XactErr retry counter */
++#define       QH_XACTERR_MAX          32              /* XactErr retry limit */
++
+       /* periodic schedule info */
+       u8                      usecs;          /* intr bandwidth */
+       u8                      gap_uf;         /* uframes split/csplit gap */
+--- kernel-maemo-2.6.28.test.orig/drivers/usb/host/isp1760-if.c
++++ kernel-maemo-2.6.28.test/drivers/usb/host/isp1760-if.c
+@@ -129,23 +129,23 @@
+ #endif
+ #ifdef CONFIG_PCI
+-static u32 nxp_pci_io_base;
+-static u32 iolength;
+-static u32 pci_mem_phy0;
+-static u32 length;
+-static u8 __iomem *chip_addr;
+-static u8 __iomem *iobase;
+-
+ static int __devinit isp1761_pci_probe(struct pci_dev *dev,
+               const struct pci_device_id *id)
+ {
+       u8 latency, limit;
+       __u32 reg_data;
+       int retry_count;
+-      int length;
+-      int status = 1;
+       struct usb_hcd *hcd;
+       unsigned int devflags = 0;
++      int ret_status = 0;
++
++      resource_size_t pci_mem_phy0;
++      resource_size_t memlength;
++
++      u8 __iomem *chip_addr;
++      u8 __iomem *iobase;
++      resource_size_t nxp_pci_io_base;
++      resource_size_t iolength;
+       if (usb_disabled())
+               return -ENODEV;
+@@ -168,26 +168,30 @@
+       iobase = ioremap_nocache(nxp_pci_io_base, iolength);
+       if (!iobase) {
+               printk(KERN_ERR "ioremap #1\n");
+-              release_mem_region(nxp_pci_io_base, iolength);
+-              return -ENOMEM;
++              ret_status = -ENOMEM;
++              goto cleanup1;
+       }
+       /* Grab the PLX PCI shared memory of the ISP 1761 we need  */
+       pci_mem_phy0 = pci_resource_start(dev, 3);
+-      length = pci_resource_len(dev, 3);
+-
+-      if (length < 0xffff) {
+-              printk(KERN_ERR "memory length for this resource is less than "
+-                              "required\n");
+-              release_mem_region(nxp_pci_io_base, iolength);
+-              iounmap(iobase);
+-              return  -ENOMEM;
++      memlength = pci_resource_len(dev, 3);
++      if (memlength < 0xffff) {
++              printk(KERN_ERR "memory length for this resource is wrong\n");
++              ret_status = -ENOMEM;
++              goto cleanup2;
+       }
+-      if (!request_mem_region(pci_mem_phy0, length, "ISP-PCI")) {
++      if (!request_mem_region(pci_mem_phy0, memlength, "ISP-PCI")) {
+               printk(KERN_ERR "host controller already in use\n");
+-              release_mem_region(nxp_pci_io_base, iolength);
+-              iounmap(iobase);
+-              return -EBUSY;
++              ret_status = -EBUSY;
++              goto cleanup2;
++      }
++
++      /* map available memory */
++      chip_addr = ioremap_nocache(pci_mem_phy0,memlength);
++      if (!chip_addr) {
++              printk(KERN_ERR "Error ioremap failed\n");
++              ret_status = -ENOMEM;
++              goto cleanup3;
+       }
+       /* bad pci latencies can contribute to overruns */
+@@ -210,39 +214,54 @@
+                * */
+               writel(0xface, chip_addr + HC_SCRATCH_REG);
+               udelay(100);
+-              reg_data = readl(chip_addr + HC_SCRATCH_REG);
++              reg_data = readl(chip_addr + HC_SCRATCH_REG) & 0x0000ffff;
+               retry_count--;
+       }
++      iounmap(chip_addr);
++
+       /* Host Controller presence is detected by writing to scratch register
+        * and reading back and checking the contents are same or not
+        */
+       if (reg_data != 0xFACE) {
+               dev_err(&dev->dev, "scratch register mismatch %x\n", reg_data);
+-              goto clean;
++              ret_status = -ENOMEM;
++              goto cleanup3;
+       }
+       pci_set_master(dev);
+-      status = readl(iobase + 0x68);
+-      status |= 0x900;
+-      writel(status, iobase + 0x68);
++      /* configure PLX PCI chip to pass interrupts */
++#define PLX_INT_CSR_REG 0x68
++      reg_data = readl(iobase + PLX_INT_CSR_REG);
++      reg_data |= 0x900;
++      writel(reg_data, iobase + PLX_INT_CSR_REG);
+       dev->dev.dma_mask = NULL;
+-      hcd = isp1760_register(pci_mem_phy0, length, dev->irq,
++      hcd = isp1760_register(pci_mem_phy0, memlength, dev->irq,
+               IRQF_SHARED | IRQF_DISABLED, &dev->dev, dev_name(&dev->dev),
+               devflags);
+-      if (!IS_ERR(hcd)) {
+-              pci_set_drvdata(dev, hcd);
+-              return 0;
++      if (IS_ERR(hcd)) {
++              ret_status = -ENODEV;
++              goto cleanup3;
+       }
+-clean:
+-      status = -ENODEV;
++
++      /* done with PLX IO access */
+       iounmap(iobase);
+-      release_mem_region(pci_mem_phy0, length);
+       release_mem_region(nxp_pci_io_base, iolength);
+-      return status;
++
++      pci_set_drvdata(dev, hcd);
++      return 0;
++
++cleanup3:
++      release_mem_region(pci_mem_phy0, memlength);
++cleanup2:
++      iounmap(iobase);
++cleanup1:
++      release_mem_region(nxp_pci_io_base, iolength);
++      return ret_status;
+ }
++
+ static void isp1761_pci_remove(struct pci_dev *dev)
+ {
+       struct usb_hcd *hcd;
+@@ -255,12 +274,6 @@
+       usb_put_hcd(hcd);
+       pci_disable_device(dev);
+-
+-      iounmap(iobase);
+-      iounmap(chip_addr);
+-
+-      release_mem_region(nxp_pci_io_base, iolength);
+-      release_mem_region(pci_mem_phy0, length);
+ }
+ static void isp1761_pci_shutdown(struct pci_dev *dev)
+@@ -268,12 +281,16 @@
+       printk(KERN_ERR "ips1761_pci_shutdown\n");
+ }
+-static const struct pci_device_id isp1760_plx [] = { {
+-      /* handle any USB 2.0 EHCI controller */
+-      PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_OTHER << 8) | (0x06 << 16)), ~0),
+-              .driver_data = 0,
+-},
+-{ /* end: all zeroes */ }
++static const struct pci_device_id isp1760_plx [] = {
++      {
++              .class          = PCI_CLASS_BRIDGE_OTHER << 8,
++              .class_mask     = ~0,
++              .vendor         = PCI_VENDOR_ID_PLX,
++              .device         = 0x5406,
++              .subvendor      = PCI_VENDOR_ID_PLX,
++              .subdevice      = 0x9054,
++      },
++      { }
+ };
+ MODULE_DEVICE_TABLE(pci, isp1760_plx);
+--- kernel-maemo-2.6.28.test.orig/drivers/usb/misc/emi26.c
++++ kernel-maemo-2.6.28.test/drivers/usb/misc/emi26.c
+@@ -160,7 +160,7 @@
+                       err("%s - error loading firmware: error = %d", __func__, err);
+                       goto wraperr;
+               }
+-      } while (i > 0);
++      } while (rec);
+       /* Assert reset (stop the CPU in the EMI) */
+       err = emi26_set_reset(dev,1);
+--- kernel-maemo-2.6.28.test.orig/drivers/usb/mon/mon_bin.c
++++ kernel-maemo-2.6.28.test/drivers/usb/mon/mon_bin.c
+@@ -37,6 +37,7 @@
+ #define MON_IOCX_GET   _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get)
+ #define MON_IOCX_MFETCH _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch)
+ #define MON_IOCH_MFLUSH _IO(MON_IOC_MAGIC, 8)
++
+ #ifdef CONFIG_COMPAT
+ #define MON_IOCX_GET32 _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get32)
+ #define MON_IOCX_MFETCH32 _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch32)
+@@ -921,21 +922,6 @@
+               }
+               break;
+-#ifdef CONFIG_COMPAT
+-      case MON_IOCX_GET32: {
+-              struct mon_bin_get32 getb;
+-
+-              if (copy_from_user(&getb, (void __user *)arg,
+-                                          sizeof(struct mon_bin_get32)))
+-                      return -EFAULT;
+-
+-              ret = mon_bin_get_event(file, rp,
+-                  compat_ptr(getb.hdr32), compat_ptr(getb.data32),
+-                  getb.alloc32);
+-              }
+-              break;
+-#endif
+-
+       case MON_IOCX_MFETCH:
+               {
+               struct mon_bin_mfetch mfetch;
+@@ -962,7 +948,57 @@
+               }
+               break;
++      case MON_IOCG_STATS: {
++              struct mon_bin_stats __user *sp;
++              unsigned int nevents;
++              unsigned int ndropped;
++
++              spin_lock_irqsave(&rp->b_lock, flags);
++              ndropped = rp->cnt_lost;
++              rp->cnt_lost = 0;
++              spin_unlock_irqrestore(&rp->b_lock, flags);
++              nevents = mon_bin_queued(rp);
++
++              sp = (struct mon_bin_stats __user *)arg;
++              if (put_user(rp->cnt_lost, &sp->dropped))
++                      return -EFAULT;
++              if (put_user(nevents, &sp->queued))
++                      return -EFAULT;
++
++              }
++              break;
++
++      default:
++              return -ENOTTY;
++      }
++
++      return ret;
++}
++
+ #ifdef CONFIG_COMPAT
++static long mon_bin_compat_ioctl(struct file *file,
++    unsigned int cmd, unsigned long arg)
++{
++      struct mon_reader_bin *rp = file->private_data;
++      int ret;
++
++      switch (cmd) {
++
++      case MON_IOCX_GET32: {
++              struct mon_bin_get32 getb;
++
++              if (copy_from_user(&getb, (void __user *)arg,
++                                          sizeof(struct mon_bin_get32)))
++                      return -EFAULT;
++
++              ret = mon_bin_get_event(file, rp,
++                  compat_ptr(getb.hdr32), compat_ptr(getb.data32),
++                  getb.alloc32);
++              if (ret < 0)
++                      return ret;
++              }
++              return 0;
++
+       case MON_IOCX_MFETCH32:
+               {
+               struct mon_bin_mfetch32 mfetch;
+@@ -986,37 +1022,25 @@
+                       return ret;
+               if (put_user(ret, &uptr->nfetch32))
+                       return -EFAULT;
+-              ret = 0;
+               }
+-              break;
+-#endif
+-
+-      case MON_IOCG_STATS: {
+-              struct mon_bin_stats __user *sp;
+-              unsigned int nevents;
+-              unsigned int ndropped;
+-
+-              spin_lock_irqsave(&rp->b_lock, flags);
+-              ndropped = rp->cnt_lost;
+-              rp->cnt_lost = 0;
+-              spin_unlock_irqrestore(&rp->b_lock, flags);
+-              nevents = mon_bin_queued(rp);
++              return 0;
+-              sp = (struct mon_bin_stats __user *)arg;
+-              if (put_user(rp->cnt_lost, &sp->dropped))
+-                      return -EFAULT;
+-              if (put_user(nevents, &sp->queued))
+-                      return -EFAULT;
++      case MON_IOCG_STATS:
++              return mon_bin_ioctl(NULL, file, cmd,
++                                          (unsigned long) compat_ptr(arg));
+-              }
+-              break;
++      case MON_IOCQ_URB_LEN:
++      case MON_IOCQ_RING_SIZE:
++      case MON_IOCT_RING_SIZE:
++      case MON_IOCH_MFLUSH:
++              return mon_bin_ioctl(NULL, file, cmd, arg);
+       default:
+-              return -ENOTTY;
++              ;
+       }
+-
+-      return ret;
++      return -ENOTTY;
+ }
++#endif /* CONFIG_COMPAT */
+ static unsigned int
+ mon_bin_poll(struct file *file, struct poll_table_struct *wait)
+@@ -1094,6 +1118,9 @@
+       /* .write =     mon_text_write, */
+       .poll =         mon_bin_poll,
+       .ioctl =        mon_bin_ioctl,
++#ifdef CONFIG_COMPAT
++      .compat_ioctl = mon_bin_compat_ioctl,
++#endif
+       .release =      mon_bin_release,
+       .mmap =         mon_bin_mmap,
+ };
+--- kernel-maemo-2.6.28.test.orig/drivers/usb/serial/cp2101.c
++++ kernel-maemo-2.6.28.test/drivers/usb/serial/cp2101.c
+@@ -79,6 +79,7 @@
+       { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
+       { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
+       { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
++      { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
+       { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
+       { USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */
+       { USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */
+--- kernel-maemo-2.6.28.test.orig/drivers/usb/serial/ftdi_sio.c
++++ kernel-maemo-2.6.28.test/drivers/usb/serial/ftdi_sio.c
+@@ -660,6 +660,12 @@
+       { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO4x4_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DGQG_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) },
++      { USB_DEVICE(ATMEL_VID, STK541_PID) },
++      { USB_DEVICE(DE_VID, STB_PID) },
++      { USB_DEVICE(DE_VID, WHT_PID) },
++      { USB_DEVICE(ADI_VID, ADI_GNICE_PID),
++              .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
++      { USB_DEVICE(JETI_VID, JETI_SPC1201_PID) },
+       { },                                    /* Optional parameter entry */
+       { }                                     /* Terminating entry */
+ };
+--- kernel-maemo-2.6.28.test.orig/drivers/usb/serial/ftdi_sio.h
++++ kernel-maemo-2.6.28.test/drivers/usb/serial/ftdi_sio.h
+@@ -881,6 +881,33 @@
+ #define RATOC_PRODUCT_ID_USB60F       0xb020
+ /*
++ * Atmel STK541
++ */
++#define ATMEL_VID             0x03eb /* Vendor ID */
++#define STK541_PID            0x2109 /* Zigbee Controller */
++
++/*
++ * Dresden Elektronic Sensor Terminal Board
++ */
++#define DE_VID                        0x1cf1 /* Vendor ID */
++#define STB_PID                       0x0001 /* Sensor Terminal Board */
++#define WHT_PID                       0x0004 /* Wireless Handheld Terminal */
++
++/*
++ * Blackfin gnICE JTAG
++ * http://docs.blackfin.uclinux.org/doku.php?id=hw:jtag:gnice
++ */
++#define ADI_VID               0x0456
++#define ADI_GNICE_PID                 0xF000
++
++/*
++ * JETI SPECTROMETER SPECBOS 1201
++ * http://www.jeti.com/products/sys/scb/scb1201.php
++ */
++#define JETI_VID              0x0c6c
++#define JETI_SPC1201_PID      0x04b2
++
++/*
+  *   BmRequestType:  1100 0000b
+  *   bRequest:       FTDI_E2_READ
+  *   wValue:         0
+--- kernel-maemo-2.6.28.test.orig/drivers/usb/serial/option.c
++++ kernel-maemo-2.6.28.test/drivers/usb/serial/option.c
+@@ -89,6 +89,7 @@
+ #define OPTION_PRODUCT_ETNA_MODEM_GT          0x7041
+ #define OPTION_PRODUCT_ETNA_MODEM_EX          0x7061
+ #define OPTION_PRODUCT_ETNA_KOI_MODEM         0x7100
++#define OPTION_PRODUCT_GTM380_MODEM           0x7201
+ #define HUAWEI_VENDOR_ID                      0x12D1
+ #define HUAWEI_PRODUCT_E600                   0x1001
+@@ -190,16 +191,18 @@
+ /* OVATION PRODUCTS */
+ #define NOVATELWIRELESS_PRODUCT_MC727         0x4100
+ #define NOVATELWIRELESS_PRODUCT_MC950D                0x4400
++#define NOVATELWIRELESS_PRODUCT_U727          0x5010
+ /* FUTURE NOVATEL PRODUCTS */
+-#define NOVATELWIRELESS_PRODUCT_EVDO_1                0x6000
+-#define NOVATELWIRELESS_PRODUCT_HSPA_1                0x7000
+-#define NOVATELWIRELESS_PRODUCT_EMBEDDED_1    0x8000
+-#define NOVATELWIRELESS_PRODUCT_GLOBAL_1      0x9000
+-#define NOVATELWIRELESS_PRODUCT_EVDO_2                0x6001
+-#define NOVATELWIRELESS_PRODUCT_HSPA_2                0x7001
+-#define NOVATELWIRELESS_PRODUCT_EMBEDDED_2    0x8001
+-#define NOVATELWIRELESS_PRODUCT_GLOBAL_2      0x9001
++#define NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED        0X6000
++#define NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED        0X6001
++#define NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED        0X7000
++#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED        0X7001
++#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED       0X8000
++#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED       0X8001
++#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED       0X9000
++#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED       0X9001
++#define NOVATELWIRELESS_PRODUCT_GLOBAL                0XA001
+ /* AMOI PRODUCTS */
+ #define AMOI_VENDOR_ID                                0x1614
+@@ -209,6 +212,27 @@
+ #define DELL_VENDOR_ID                                0x413C
++/* Dell modems */
++#define DELL_PRODUCT_5700_MINICARD            0x8114
++#define DELL_PRODUCT_5500_MINICARD            0x8115
++#define DELL_PRODUCT_5505_MINICARD            0x8116
++#define DELL_PRODUCT_5700_EXPRESSCARD         0x8117
++#define DELL_PRODUCT_5510_EXPRESSCARD         0x8118
++
++#define DELL_PRODUCT_5700_MINICARD_SPRINT     0x8128
++#define DELL_PRODUCT_5700_MINICARD_TELUS      0x8129
++
++#define DELL_PRODUCT_5720_MINICARD_VZW                0x8133
++#define DELL_PRODUCT_5720_MINICARD_SPRINT     0x8134
++#define DELL_PRODUCT_5720_MINICARD_TELUS      0x8135
++#define DELL_PRODUCT_5520_MINICARD_CINGULAR   0x8136
++#define DELL_PRODUCT_5520_MINICARD_GENERIC_L  0x8137
++#define DELL_PRODUCT_5520_MINICARD_GENERIC_I  0x8138
++
++#define DELL_PRODUCT_5730_MINICARD_SPRINT     0x8180
++#define DELL_PRODUCT_5730_MINICARD_TELUS      0x8181
++#define DELL_PRODUCT_5730_MINICARD_VZW                0x8182
++
+ #define KYOCERA_VENDOR_ID                     0x0c88
+ #define KYOCERA_PRODUCT_KPC650                        0x17da
+ #define KYOCERA_PRODUCT_KPC680                        0x180a
+@@ -259,19 +283,13 @@
+ /* ZTE PRODUCTS */
+ #define ZTE_VENDOR_ID                         0x19d2
++#define ZTE_PRODUCT_MF622                     0x0001
+ #define ZTE_PRODUCT_MF628                     0x0015
+ #define ZTE_PRODUCT_MF626                     0x0031
+ #define ZTE_PRODUCT_CDMA_TECH                 0xfffe
+-/* Ericsson products */
+-#define ERICSSON_VENDOR_ID                    0x0bdb
+-#define ERICSSON_PRODUCT_F3507G                       0x1900
+-
+-/* Pantech products */
+-#define PANTECH_VENDOR_ID                     0x106c
+-#define PANTECH_PRODUCT_PC5740                        0x3701
+-#define PANTECH_PRODUCT_PC5750                        0x3702  /* PX-500 */
+-#define PANTECH_PRODUCT_UM150                 0x3711
++#define BENQ_VENDOR_ID                                0x04a5
++#define BENQ_PRODUCT_H10                      0x4068
+ static struct usb_device_id option_ids[] = {
+       { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
+@@ -298,6 +316,7 @@
+       { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_MODEM_GT) },
+       { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_MODEM_EX) },
+       { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_KOI_MODEM) },
++      { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_GTM380_MODEM) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E600, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220BIS, 0xff, 0xff, 0xff) },
+@@ -383,31 +402,37 @@
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */
+-      { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_1) }, /* Novatel EVDO product */
+-      { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_1) }, /* Novatel HSPA product */
+-      { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EMBEDDED_1) }, /* Novatel Embedded product */
+-      { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_GLOBAL_1) }, /* Novatel Global product */
+-      { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_2) }, /* Novatel EVDO product */
+-      { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_2) }, /* Novatel HSPA product */
+-      { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EMBEDDED_2) }, /* Novatel Embedded product */
+-      { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_GLOBAL_2) }, /* Novatel Global product */
++      { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel MC727/U727/USB727 */
++      { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED) }, /* Novatel EVDO product */
++      { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) }, /* Novatel HSPA product */
++      { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) }, /* Novatel EVDO Embedded product */
++      { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) }, /* Novatel HSPA Embedded product */
++      { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED) }, /* Novatel EVDO product */
++      { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED) }, /* Novatel HSPA product */
++      { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED) }, /* Novatel EVDO Embedded product */
++      { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED) }, /* Novatel HSPA Embedded product */
++      { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_GLOBAL) }, /* Novatel Global product */
+       { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
+       { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
+       { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H02) },
+-      { USB_DEVICE(DELL_VENDOR_ID, 0x8114) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite EV620 CDMA/EV-DO */
+-      { USB_DEVICE(DELL_VENDOR_ID, 0x8115) }, /* Dell Wireless 5500 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */
+-      { USB_DEVICE(DELL_VENDOR_ID, 0x8116) }, /* Dell Wireless 5505 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */
+-      { USB_DEVICE(DELL_VENDOR_ID, 0x8117) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO ExpressCard == Novatel Merlin XV620 CDMA/EV-DO */
+-      { USB_DEVICE(DELL_VENDOR_ID, 0x8118) }, /* Dell Wireless 5510 Mobile Broadband HSDPA ExpressCard == Novatel Merlin XU870 HSDPA/3G */
+-      { USB_DEVICE(DELL_VENDOR_ID, 0x8128) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite E720 CDMA/EV-DO */
+-      { USB_DEVICE(DELL_VENDOR_ID, 0x8129) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite ET620 CDMA/EV-DO */
+-      { USB_DEVICE(DELL_VENDOR_ID, 0x8133) }, /* Dell Wireless 5720 == Novatel EV620 CDMA/EV-DO */
+-      { USB_DEVICE(DELL_VENDOR_ID, 0x8136) }, /* Dell Wireless HSDPA 5520 == Novatel Expedite EU860D */
+-      { USB_DEVICE(DELL_VENDOR_ID, 0x8137) }, /* Dell Wireless HSDPA 5520 */
+-      { USB_DEVICE(DELL_VENDOR_ID, 0x8138) }, /* Dell Wireless 5520 Voda I Mobile Broadband (3G HSDPA) Minicard */
+-      { USB_DEVICE(DELL_VENDOR_ID, 0x8147) }, /* Dell Wireless 5530 Mobile Broadband (3G HSPA) Mini-Card */
++      { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_MINICARD) },             /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite EV620 CDMA/EV-DO */
++      { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5500_MINICARD) },             /* Dell Wireless 5500 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */
++      { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5505_MINICARD) },             /* Dell Wireless 5505 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */
++      { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_EXPRESSCARD) },          /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO ExpressCard == Novatel Merlin XV620 CDMA/EV-DO */
++      { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5510_EXPRESSCARD) },          /* Dell Wireless 5510 Mobile Broadband HSDPA ExpressCard == Novatel Merlin XU870 HSDPA/3G */
++      { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_MINICARD_SPRINT) },      /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite E720 CDMA/EV-DO */
++      { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_MINICARD_TELUS) },       /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite ET620 CDMA/EV-DO */
++      { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5720_MINICARD_VZW) },         /* Dell Wireless 5720 == Novatel EV620 CDMA/EV-DO */
++      { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5720_MINICARD_SPRINT) },      /* Dell Wireless 5720 == Novatel EV620 CDMA/EV-DO */
++      { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5720_MINICARD_TELUS) },       /* Dell Wireless 5720 == Novatel EV620 CDMA/EV-DO */
++      { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5520_MINICARD_CINGULAR) },    /* Dell Wireless HSDPA 5520 == Novatel Expedite EU860D */
++      { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5520_MINICARD_GENERIC_L) },   /* Dell Wireless HSDPA 5520 */
++      { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5520_MINICARD_GENERIC_I) },   /* Dell Wireless 5520 Voda I Mobile Broadband (3G HSDPA) Minicard */
++      { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_SPRINT) },      /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
++      { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_TELUS) },       /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
++      { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_VZW) },         /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
+       { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) },   /* ADU-E100, ADU-310 */
+       { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
+       { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
+@@ -472,13 +497,12 @@
+       { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
+       { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */
+       { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
++      { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622) },
+       { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626) },
+       { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) },
+       { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH) },
+-      { USB_DEVICE(ERICSSON_VENDOR_ID, ERICSSON_PRODUCT_F3507G) },
+-      { USB_DEVICE(PANTECH_VENDOR_ID, PANTECH_PRODUCT_PC5740) },
+-      { USB_DEVICE(PANTECH_VENDOR_ID, PANTECH_PRODUCT_PC5750) },
+-      { USB_DEVICE(PANTECH_VENDOR_ID, PANTECH_PRODUCT_UM150) },
++      { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
++      { USB_DEVICE(0x1da5, 0x4515) }, /* BenQ H20 */
+       { } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+--- kernel-maemo-2.6.28.test.orig/drivers/usb/serial/ti_usb_3410_5052.c
++++ kernel-maemo-2.6.28.test/drivers/usb/serial/ti_usb_3410_5052.c
+@@ -176,25 +176,32 @@
+ /* the array dimension is the number of default entries plus */
+ /* TI_EXTRA_VID_PID_COUNT user defined entries plus 1 terminating */
+ /* null entry */
+-static struct usb_device_id ti_id_table_3410[1+TI_EXTRA_VID_PID_COUNT+1] = {
++static struct usb_device_id ti_id_table_3410[10+TI_EXTRA_VID_PID_COUNT+1] = {
+       { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
+       { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
++      { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
++      { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
++      { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
+ };
+-static struct usb_device_id ti_id_table_5052[4+TI_EXTRA_VID_PID_COUNT+1] = {
++static struct usb_device_id ti_id_table_5052[5+TI_EXTRA_VID_PID_COUNT+1] = {
+       { USB_DEVICE(TI_VENDOR_ID, TI_5052_BOOT_PRODUCT_ID) },
+       { USB_DEVICE(TI_VENDOR_ID, TI_5152_BOOT_PRODUCT_ID) },
+       { USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) },
+       { USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) },
++      { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
+ };
+-static struct usb_device_id ti_id_table_combined[] = {
++static struct usb_device_id ti_id_table_combined[14+2*TI_EXTRA_VID_PID_COUNT+1] = {
+       { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
+       { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
+       { USB_DEVICE(TI_VENDOR_ID, TI_5052_BOOT_PRODUCT_ID) },
+       { USB_DEVICE(TI_VENDOR_ID, TI_5152_BOOT_PRODUCT_ID) },
+       { USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) },
+       { USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) },
++      { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
++      { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
++      { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
+       { }
+ };
+--- kernel-maemo-2.6.28.test.orig/drivers/usb/serial/ti_usb_3410_5052.h
++++ kernel-maemo-2.6.28.test/drivers/usb/serial/ti_usb_3410_5052.h
+@@ -27,7 +27,11 @@
+ /* Vendor and product ids */
+ #define TI_VENDOR_ID                  0x0451
++#define IBM_VENDOR_ID                 0x04b3
+ #define TI_3410_PRODUCT_ID            0x3410
++#define IBM_4543_PRODUCT_ID           0x4543
++#define IBM_454B_PRODUCT_ID           0x454b
++#define IBM_454C_PRODUCT_ID           0x454c
+ #define TI_3410_EZ430_ID              0xF430  /* TI ez430 development tool */
+ #define TI_5052_BOOT_PRODUCT_ID               0x5052  /* no EEPROM, no firmware */
+ #define TI_5152_BOOT_PRODUCT_ID               0x5152  /* no EEPROM, no firmware */
+--- kernel-maemo-2.6.28.test.orig/drivers/usb/storage/cypress_atacb.c
++++ kernel-maemo-2.6.28.test/drivers/usb/storage/cypress_atacb.c
+@@ -133,19 +133,18 @@
+               /* build the command for
+                * reading the ATA registers */
+-              scsi_eh_prep_cmnd(srb, &ses, NULL, 0, 0);
+-              srb->sdb.length = sizeof(regs);
+-              sg_init_one(&ses.sense_sgl, regs, srb->sdb.length);
+-              srb->sdb.table.sgl = &ses.sense_sgl;
+-              srb->sc_data_direction = DMA_FROM_DEVICE;
+-              srb->sdb.table.nents = 1;
++              scsi_eh_prep_cmnd(srb, &ses, NULL, 0, sizeof(regs));
++
+               /* we use the same command as before, but we set
+                * the read taskfile bit, for not executing atacb command,
+                * but reading register selected in srb->cmnd[4]
+                */
++              srb->cmd_len = 16;
++              srb->cmnd = ses.cmnd;
+               srb->cmnd[2] = 1;
+               usb_stor_transparent_scsi_command(srb, us);
++              memcpy(regs, srb->sense_buffer, sizeof(regs));
+               tmp_result = srb->result;
+               scsi_eh_restore_cmnd(srb, &ses);
+               /* we fail to get registers, report invalid command */
+@@ -162,8 +161,8 @@
+               /* XXX we should generate sk, asc, ascq from status and error
+                * regs
+-               * (see 11.1 Error translation Â­ ATA device error to SCSI error map)
+-               * and ata_to_sense_error from libata.
++               * (see 11.1 Error translation ATA device error to SCSI error
++               *  map, and ata_to_sense_error from libata.)
+                */
+               /* Sense data is current and format is descriptor. */
+--- kernel-maemo-2.6.28.test.orig/drivers/usb/storage/libusual.c
++++ kernel-maemo-2.6.28.test/drivers/usb/storage/libusual.c
+@@ -46,6 +46,12 @@
+ { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin,bcdDeviceMax), \
+   .driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
++#define COMPLIANT_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
++                  vendorName, productName, useProtocol, useTransport, \
++                  initFunction, flags) \
++{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
++  .driver_info = (flags) }
++
+ #define USUAL_DEV(useProto, useTrans, useType) \
+ { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, useProto, useTrans), \
+   .driver_info = ((useType)<<24) }
+@@ -57,6 +63,7 @@
+ #undef USUAL_DEV
+ #undef UNUSUAL_DEV
++#undef COMPLIANT_DEV
+ MODULE_DEVICE_TABLE(usb, storage_usb_ids);
+ EXPORT_SYMBOL_GPL(storage_usb_ids);
+--- kernel-maemo-2.6.28.test.orig/drivers/usb/storage/scsiglue.c
++++ kernel-maemo-2.6.28.test/drivers/usb/storage/scsiglue.c
+@@ -59,6 +59,14 @@
+ #include "transport.h"
+ #include "protocol.h"
++/* Vendor IDs for companies that seem to include the READ CAPACITY bug
++ * in all their devices
++ */
++#define VENDOR_ID_NOKIA               0x0421
++#define VENDOR_ID_NIKON               0x04b0
++#define VENDOR_ID_PENTAX      0x0a17
++#define VENDOR_ID_MOTOROLA    0x22b8
++
+ /***********************************************************************
+  * Host functions 
+  ***********************************************************************/
+@@ -127,6 +135,12 @@
+               if (sdev->request_queue->max_sectors > max_sectors)
+                       blk_queue_max_sectors(sdev->request_queue,
+                                             max_sectors);
++      } else if (sdev->type == TYPE_TAPE) {
++              /* Tapes need much higher max_sector limits, so just
++               * raise it to the maximum possible (4 GB / 512) and
++               * let the queue segment size sort out the real limit.
++               */
++              blk_queue_max_sectors(sdev->request_queue, 0x7FFFFF);
+       }
+       /* We can't put these settings in slave_alloc() because that gets
+@@ -134,6 +148,23 @@
+        * settings can't be overridden via the scsi devinfo mechanism. */
+       if (sdev->type == TYPE_DISK) {
++              /* Some vendors seem to put the READ CAPACITY bug into
++               * all their devices -- primarily makers of cell phones
++               * and digital cameras.  Since these devices always use
++               * flash media and can be expected to have an even number
++               * of sectors, we will always enable the CAPACITY_HEURISTICS
++               * flag unless told otherwise. */
++              switch (le16_to_cpu(us->pusb_dev->descriptor.idVendor)) {
++              case VENDOR_ID_NOKIA:
++              case VENDOR_ID_NIKON:
++              case VENDOR_ID_PENTAX:
++              case VENDOR_ID_MOTOROLA:
++                      if (!(us->fflags & (US_FL_FIX_CAPACITY |
++                                      US_FL_CAPACITY_OK)))
++                              us->fflags |= US_FL_CAPACITY_HEURISTICS;
++                      break;
++              }
++
+               /* Disk-type devices use MODE SENSE(6) if the protocol
+                * (SubClass) is Transparent SCSI, otherwise they use
+                * MODE SENSE(10). */
+@@ -196,6 +227,14 @@
+                * sector in a larger then 1 sector read, since the performance
+                * impact is negible we set this flag for all USB disks */
+               sdev->last_sector_bug = 1;
++
++              /* Enable last-sector hacks for single-target devices using
++               * the Bulk-only transport, unless we already know the
++               * capacity will be decremented or is correct. */
++              if (!(us->fflags & (US_FL_FIX_CAPACITY | US_FL_CAPACITY_OK |
++                                      US_FL_SCM_MULT_TARG)) &&
++                              us->protocol == US_PR_BULK)
++                      us->use_last_sector_hacks = 1;
+       } else {
+               /* Non-disk-type devices don't need to blacklist any pages
+--- kernel-maemo-2.6.28.test.orig/drivers/usb/storage/transport.c
++++ kernel-maemo-2.6.28.test/drivers/usb/storage/transport.c
+@@ -57,6 +57,9 @@
+ #include "scsiglue.h"
+ #include "debug.h"
++#include <linux/blkdev.h>
++#include "../../scsi/sd.h"
++
+ /***********************************************************************
+  * Data transfer routines
+@@ -511,6 +514,80 @@
+  * Transport routines
+  ***********************************************************************/
++/* There are so many devices that report the capacity incorrectly,
++ * this routine was written to counteract some of the resulting
++ * problems.
++ */
++static void last_sector_hacks(struct us_data *us, struct scsi_cmnd *srb)
++{
++      struct gendisk *disk;
++      struct scsi_disk *sdkp;
++      u32 sector;
++
++      /* To Report "Medium Error: Record Not Found */
++      static unsigned char record_not_found[18] = {
++              [0]     = 0x70,                 /* current error */
++              [2]     = MEDIUM_ERROR,         /* = 0x03 */
++              [7]     = 0x0a,                 /* additional length */
++              [12]    = 0x14                  /* Record Not Found */
++      };
++
++      /* If last-sector problems can't occur, whether because the
++       * capacity was already decremented or because the device is
++       * known to report the correct capacity, then we don't need
++       * to do anything.
++       */
++      if (!us->use_last_sector_hacks)
++              return;
++
++      /* Was this command a READ(10) or a WRITE(10)? */
++      if (srb->cmnd[0] != READ_10 && srb->cmnd[0] != WRITE_10)
++              goto done;
++
++      /* Did this command access the last sector? */
++      sector = (srb->cmnd[2] << 24) | (srb->cmnd[3] << 16) |
++                      (srb->cmnd[4] << 8) | (srb->cmnd[5]);
++      disk = srb->request->rq_disk;
++      if (!disk)
++              goto done;
++      sdkp = scsi_disk(disk);
++      if (!sdkp)
++              goto done;
++      if (sector + 1 != sdkp->capacity)
++              goto done;
++
++      if (srb->result == SAM_STAT_GOOD && scsi_get_resid(srb) == 0) {
++
++              /* The command succeeded.  We know this device doesn't
++               * have the last-sector bug, so stop checking it.
++               */
++              us->use_last_sector_hacks = 0;
++
++      } else {
++              /* The command failed.  Allow up to 3 retries in case this
++               * is some normal sort of failure.  After that, assume the
++               * capacity is wrong and we're trying to access the sector
++               * beyond the end.  Replace the result code and sense data
++               * with values that will cause the SCSI core to fail the
++               * command immediately, instead of going into an infinite
++               * (or even just a very long) retry loop.
++               */
++              if (++us->last_sector_retries < 3)
++                      return;
++              srb->result = SAM_STAT_CHECK_CONDITION;
++              memcpy(srb->sense_buffer, record_not_found,
++                              sizeof(record_not_found));
++      }
++
++ done:
++      /* Don't reset the retry counter for TEST UNIT READY commands,
++       * because they get issued after device resets which might be
++       * caused by a failed last-sector access.
++       */
++      if (srb->cmnd[0] != TEST_UNIT_READY)
++              us->last_sector_retries = 0;
++}
++
+ /* Invoke the transport and basic error-handling/recovery methods
+  *
+  * This is used by the protocol layers to actually send the message to
+@@ -544,6 +621,7 @@
+       /* if the transport provided its own sense data, don't auto-sense */
+       if (result == USB_STOR_TRANSPORT_NO_SENSE) {
+               srb->result = SAM_STAT_CHECK_CONDITION;
++              last_sector_hacks(us, srb);
+               return;
+       }
+@@ -667,6 +745,7 @@
+                       scsi_bufflen(srb) - scsi_get_resid(srb) < srb->underflow)
+               srb->result = (DID_ERROR << 16) | (SUGGEST_RETRY << 24);
++      last_sector_hacks(us, srb);
+       return;
+       /* Error and abort processing: try to resynchronize with the device
+@@ -694,6 +773,7 @@
+               us->transport_reset(us);
+       }
+       clear_bit(US_FLIDX_RESETTING, &us->dflags);
++      last_sector_hacks(us, srb);
+ }
+ /* Stop the current URB transfer */
+--- kernel-maemo-2.6.28.test.orig/drivers/usb/storage/unusual_devs.h
++++ kernel-maemo-2.6.28.test/drivers/usb/storage/unusual_devs.h
+@@ -27,7 +27,8 @@
+ /* IMPORTANT NOTE: This file must be included in another file which does
+  * the following thing for it to work:
+- * The macro UNUSUAL_DEV() must be defined before this file is included
++ * The UNUSUAL_DEV, COMPLIANT_DEV, and USUAL_DEV macros must be defined
++ * before this file is included.
+  */
+ /* If you edit this file, please try to keep it sorted first by VendorID,
+@@ -46,6 +47,12 @@
+  * <usb-storage@lists.one-eyed-alien.net>
+  */
++/* Note: If you add an entry only in order to set the CAPACITY_OK flag,
++ * use the COMPLIANT_DEV macro instead of UNUSUAL_DEV.  This is
++ * because such entries mark devices which actually work correctly,
++ * as opposed to devices that do something strangely or wrongly.
++ */
++
+ /* patch submitted by Vivian Bregier <Vivian.Bregier@imag.fr>
+  */
+ UNUSUAL_DEV(  0x03eb, 0x2002, 0x0100, 0x0100,
+@@ -160,34 +167,6 @@
+               US_SC_DEVICE, US_PR_DEVICE, NULL,
+               US_FL_MAX_SECTORS_64 ),
+-/* Reported by Filip Joelsson <filip@blueturtle.nu> */
+-UNUSUAL_DEV(  0x0421, 0x005d, 0x0001, 0x0600,
+-              "Nokia",
+-              "Nokia 3110c",
+-              US_SC_DEVICE, US_PR_DEVICE, NULL,
+-              US_FL_FIX_CAPACITY ),
+-
+-/* Reported by Ozan Sener <themgzzy@gmail.com> */
+-UNUSUAL_DEV(  0x0421, 0x0060, 0x0551, 0x0551,
+-              "Nokia",
+-              "3500c",
+-              US_SC_DEVICE, US_PR_DEVICE, NULL,
+-              US_FL_FIX_CAPACITY ),
+-
+-/* Reported by CSECSY Laszlo <boobaa@frugalware.org> */
+-UNUSUAL_DEV(  0x0421, 0x0063, 0x0001, 0x0601,
+-              "Nokia",
+-              "Nokia 3109c",
+-              US_SC_DEVICE, US_PR_DEVICE, NULL,
+-              US_FL_FIX_CAPACITY ),
+-
+-/* Patch for Nokia 5310 capacity */
+-UNUSUAL_DEV(  0x0421, 0x006a, 0x0000, 0x0701,
+-              "Nokia",
+-              "5310",
+-              US_SC_DEVICE, US_PR_DEVICE, NULL,
+-              US_FL_FIX_CAPACITY ),
+-
+ /* Reported by Mario Rettig <mariorettig@web.de> */
+ UNUSUAL_DEV(  0x0421, 0x042e, 0x0100, 0x0100,
+               "Nokia",
+@@ -240,7 +219,7 @@
+               US_FL_MAX_SECTORS_64 ),
+ /* Reported by Manuel Osdoba <manuel.osdoba@tu-ilmenau.de> */
+-UNUSUAL_DEV( 0x0421, 0x0492, 0x0452, 0x0452,
++UNUSUAL_DEV( 0x0421, 0x0492, 0x0452, 0x9999,
+               "Nokia",
+               "Nokia 6233",
+               US_SC_DEVICE, US_PR_DEVICE, NULL,
+@@ -253,35 +232,6 @@
+               US_SC_DEVICE, US_PR_DEVICE, NULL,
+               US_FL_MAX_SECTORS_64 ),
+-/* Reported by Cedric Godin <cedric@belbone.be> */
+-UNUSUAL_DEV(  0x0421, 0x04b9, 0x0500, 0x0551,
+-              "Nokia",
+-              "5300",
+-              US_SC_DEVICE, US_PR_DEVICE, NULL,
+-              US_FL_FIX_CAPACITY ),
+-
+-/* Reported by Richard Nauber <RichardNauber@web.de> */
+-UNUSUAL_DEV(  0x0421, 0x04fa, 0x0550, 0x0660,
+-              "Nokia",
+-              "6300",
+-              US_SC_DEVICE, US_PR_DEVICE, NULL,
+-              US_FL_FIX_CAPACITY ),
+-
+-/* Patch for Nokia 5310 capacity */
+-UNUSUAL_DEV(  0x0421, 0x006a, 0x0000, 0x0591,
+-      "Nokia",
+-      "5310",
+-      US_SC_DEVICE, US_PR_DEVICE, NULL,
+-      US_FL_FIX_CAPACITY ),
+-
+-/* Submitted by Ricky Wong Yung Fei <evilbladewarrior@gmail.com> */
+-/* Nokia 7610 Supernova - Too many sectors reported in usb storage mode */
+-UNUSUAL_DEV(  0x0421, 0x00f5, 0x0000, 0x0470,
+-      "Nokia",
+-      "7610 Supernova",
+-      US_SC_DEVICE, US_PR_DEVICE, NULL,
+-      US_FL_FIX_CAPACITY ),
+-
+ /* Reported by Olaf Hering <olh@suse.de> from novell bug #105878 */
+ UNUSUAL_DEV(  0x0424, 0x0fdc, 0x0210, 0x0210,
+               "SMSC",
+@@ -396,83 +346,6 @@
+               US_SC_DEVICE, US_PR_DEVICE,NULL,
+               US_FL_NOT_LOCKABLE ),
+-/* Reported by Stefan de Konink <skinkie@xs4all.nl> */
+-UNUSUAL_DEV(  0x04b0, 0x0401, 0x0200, 0x0200,
+-              "NIKON",
+-              "NIKON DSC D100",
+-              US_SC_DEVICE, US_PR_DEVICE, NULL,
+-              US_FL_FIX_CAPACITY),
+-
+-/* Reported by Tobias Kunze Briseno <t-linux@fictive.com> */
+-UNUSUAL_DEV(  0x04b0, 0x0403, 0x0200, 0x0200,
+-              "NIKON",
+-              "NIKON DSC D2H",
+-              US_SC_DEVICE, US_PR_DEVICE, NULL,
+-              US_FL_FIX_CAPACITY),
+-
+-/* Reported by Milinevsky Dmitry <niam.niam@gmail.com> */
+-UNUSUAL_DEV(  0x04b0, 0x0409, 0x0100, 0x0100,
+-              "NIKON",
+-              "NIKON DSC D50",
+-              US_SC_DEVICE, US_PR_DEVICE, NULL,
+-              US_FL_FIX_CAPACITY),
+-
+-/* Reported by Andreas Bockhold <andreas@bockionline.de> */
+-UNUSUAL_DEV(  0x04b0, 0x0405, 0x0100, 0x0100,
+-              "NIKON",
+-              "NIKON DSC D70",
+-              US_SC_DEVICE, US_PR_DEVICE, NULL,
+-              US_FL_FIX_CAPACITY),
+-
+-/* Reported by Jamie Kitson <jamie@staberinde.fsnet.co.uk> */
+-UNUSUAL_DEV(  0x04b0, 0x040d, 0x0100, 0x0100,
+-              "NIKON",
+-              "NIKON DSC D70s",
+-              US_SC_DEVICE, US_PR_DEVICE, NULL,
+-              US_FL_FIX_CAPACITY),
+-
+-/* Reported by Graber and Mike Pagano <mpagano-kernel@mpagano.com> */
+-UNUSUAL_DEV(  0x04b0, 0x040f, 0x0100, 0x0200,
+-              "NIKON",
+-              "NIKON DSC D200",
+-              US_SC_DEVICE, US_PR_DEVICE, NULL,
+-              US_FL_FIX_CAPACITY),
+-
+-/* Reported by Emil Larsson <emil@swip.net> */
+-UNUSUAL_DEV(  0x04b0, 0x0411, 0x0100, 0x0111,
+-              "NIKON",
+-              "NIKON DSC D80",
+-              US_SC_DEVICE, US_PR_DEVICE, NULL,
+-              US_FL_FIX_CAPACITY),
+-
+-/* Reported by Ortwin Glueck <odi@odi.ch> */
+-UNUSUAL_DEV(  0x04b0, 0x0413, 0x0110, 0x0111,
+-              "NIKON",
+-              "NIKON DSC D40",
+-              US_SC_DEVICE, US_PR_DEVICE, NULL,
+-              US_FL_FIX_CAPACITY),
+-
+-/* Reported by Paul Check <paul@openstreet.com> */
+-UNUSUAL_DEV(  0x04b0, 0x0415, 0x0100, 0x0100,
+-              "NIKON",
+-              "NIKON DSC D2Xs",
+-              US_SC_DEVICE, US_PR_DEVICE, NULL,
+-              US_FL_FIX_CAPACITY),
+-
+-/* Reported by Shan Destromp (shansan@gmail.com) */
+-UNUSUAL_DEV(  0x04b0, 0x0417, 0x0100, 0x0100,
+-              "NIKON",
+-              "NIKON DSC D40X",
+-              US_SC_DEVICE, US_PR_DEVICE, NULL,
+-              US_FL_FIX_CAPACITY),
+-
+-/* Reported by paul ready <lxtwin@homecall.co.uk> */
+-UNUSUAL_DEV(  0x04b0, 0x0419, 0x0100, 0x0200,
+-              "NIKON",
+-              "NIKON DSC D300",
+-              US_SC_DEVICE, US_PR_DEVICE, NULL,
+-              US_FL_FIX_CAPACITY),
+-
+ /* Reported by Doug Maxey (dwm@austin.ibm.com) */
+ UNUSUAL_DEV(  0x04b3, 0x4001, 0x0110, 0x0110,
+               "IBM",
+@@ -685,6 +558,13 @@
+               US_SC_8070, US_PR_DEVICE, NULL,
+               US_FL_FIX_INQUIRY ),
++/* Added by Alan Stern <stern@rowland.harvard.edu> */
++COMPLIANT_DEV(0x0525, 0xa4a5, 0x0000, 0x9999,
++              "Linux",
++              "File-backed Storage Gadget",
++              US_SC_DEVICE, US_PR_DEVICE, NULL,
++              US_FL_CAPACITY_OK ),
++
+ /* Yakumo Mega Image 37
+  * Submitted by Stephan Fuhrmann <atomenergie@t-online.de> */
+ UNUSUAL_DEV(  0x052b, 0x1801, 0x0100, 0x0100,
+@@ -996,13 +876,13 @@
+               "Genesys Logic",
+               "USB to IDE Optical",
+               US_SC_DEVICE, US_PR_DEVICE, NULL,
+-              US_FL_GO_SLOW | US_FL_MAX_SECTORS_64 ),
++              US_FL_GO_SLOW | US_FL_MAX_SECTORS_64 | US_FL_IGNORE_RESIDUE ),
+ UNUSUAL_DEV(  0x05e3, 0x0702, 0x0000, 0xffff,
+               "Genesys Logic",
+               "USB to IDE Disk",
+               US_SC_DEVICE, US_PR_DEVICE, NULL,
+-              US_FL_GO_SLOW | US_FL_MAX_SECTORS_64 ),
++              US_FL_GO_SLOW | US_FL_MAX_SECTORS_64 | US_FL_IGNORE_RESIDUE ),
+ /* Reported by Hanno Boeck <hanno@gmx.de>
+  * Taken from the Lycoris Kernel */
+@@ -1033,14 +913,16 @@
+               US_FL_FIX_CAPACITY ),
+ /* Reported by Richard -=[]=- <micro_flyer@hotmail.com> */
+-UNUSUAL_DEV( 0x067b, 0x2507, 0x0100, 0x0100,
++/* Change to bcdDeviceMin (0x0100 to 0x0001) reported by
++ * Thomas Bartosik <tbartdev@gmx-topmail.de> */
++UNUSUAL_DEV( 0x067b, 0x2507, 0x0001, 0x0100,
+               "Prolific Technology Inc.",
+               "Mass Storage Device",
+               US_SC_DEVICE, US_PR_DEVICE, NULL,
+               US_FL_FIX_CAPACITY | US_FL_GO_SLOW ),
+ /* Reported by Alex Butcher <alex.butcher@assursys.co.uk> */
+-UNUSUAL_DEV( 0x067b, 0x3507, 0x0001, 0x0001,
++UNUSUAL_DEV( 0x067b, 0x3507, 0x0001, 0x0101,
+               "Prolific Technology Inc.",
+               "ATAPI-6 Bridge Controller",
+               US_SC_DEVICE, US_PR_DEVICE, NULL,
+@@ -1282,12 +1164,14 @@
+               US_SC_DEVICE, US_PR_DEVICE, NULL,
+               US_FL_FIX_INQUIRY ),
+-/* Reported by Rauch Wolke <rauchwolke@gmx.net> */
++/* Reported by Rauch Wolke <rauchwolke@gmx.net>
++ * and augmented by binbin <binbinsh@gmail.com> (Bugzilla #12882)
++ */
+ UNUSUAL_DEV(  0x07c4, 0xa4a5, 0x0000, 0xffff,
+               "Simple Tech/Datafab",
+               "CF+SM Reader",
+               US_SC_DEVICE, US_PR_DEVICE, NULL,
+-              US_FL_IGNORE_RESIDUE ),
++              US_FL_IGNORE_RESIDUE | US_FL_MAX_SECTORS_64 ),
+ /* Casio QV 2x00/3x00/4000/8000 digital still cameras are not conformant
+  * to the USB storage specification in two ways:
+@@ -1320,6 +1204,13 @@
+               US_SC_DEVICE, US_PR_DEVICE, NULL,
+               US_FL_FIX_CAPACITY),
++/* Reported and patched by Nguyen Anh Quynh <aquynh@gmail.com> */
++UNUSUAL_DEV( 0x0840, 0x0084, 0x0001, 0x0001,
++              "Argosy",
++              "Storage",
++              US_SC_DEVICE, US_PR_DEVICE, NULL,
++              US_FL_FIX_CAPACITY),
++
+ /* Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>.
+  * Flag will support Bulk devices which use a standards-violating 32-byte
+  * Command Block Wrapper. Here, the "DC2MEGA" cameras (several brands) with
+@@ -1417,14 +1308,6 @@
+               US_SC_DEVICE, US_PR_DEVICE, NULL,
+               US_FL_FIX_INQUIRY ),
+-
+-/* Submitted by Per Winkvist <per.winkvist@uk.com> */
+-UNUSUAL_DEV( 0x0a17, 0x006, 0x0000, 0xffff,
+-              "Pentax",
+-              "Optio S/S4",
+-              US_SC_DEVICE, US_PR_DEVICE, NULL,
+-              US_FL_FIX_INQUIRY ),
+-
+ /* These are virtual windows driver CDs, which the zd1211rw driver
+  * automatically converts into WLAN devices. */
+ UNUSUAL_DEV( 0x0ace, 0x2011, 0x0101, 0x0101,
+@@ -1449,6 +1332,16 @@
+               US_SC_DEVICE, US_PR_DEVICE, NULL,
+               0 ),
++/* Reported by Jan Dumon <j.dumon@option.com>
++ * This device (wrongly) has a vendor-specific device descriptor.
++ * The entry is needed so usb-storage can bind to it's mass-storage
++ * interface as an interface driver */
++UNUSUAL_DEV( 0x0af0, 0x7501, 0x0000, 0x0000,
++              "Option",
++              "GI 0431 SD-Card",
++              US_SC_DEVICE, US_PR_DEVICE, NULL,
++              0 ),
++
+ #ifdef CONFIG_USB_STORAGE_ISD200
+ UNUSUAL_DEV(  0x0bf6, 0xa001, 0x0100, 0x0110,
+               "ATI",
+@@ -2076,6 +1969,12 @@
+               US_SC_DEVICE, US_PR_DEVICE, NULL,
+               US_FL_IGNORE_DEVICE),
++UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001,
++              "ST",
++              "2A",
++              US_SC_DEVICE, US_PR_DEVICE, NULL,
++              US_FL_FIX_CAPACITY),
++
+ /* patch submitted by Davide Perini <perini.davide@dpsoftware.org>
+  * and Renato Perini <rperini@email.it>
+  */
+@@ -2086,27 +1985,6 @@
+               US_FL_FIX_CAPACITY | US_FL_IGNORE_RESIDUE ),
+ /*
+- * Patch by Pete Zaitcev <zaitcev@redhat.com>
+- * Report by Mark Patton. Red Hat bz#208928.
+- * Added support for rev 0x0002 (Motorola ROKR W5)
+- * by Javier Smaldone <javier@smaldone.com.ar>
+- */
+-UNUSUAL_DEV(  0x22b8, 0x4810, 0x0001, 0x0002,
+-              "Motorola",
+-              "RAZR V3i/ROKR W5",
+-              US_SC_DEVICE, US_PR_DEVICE, NULL,
+-              US_FL_FIX_CAPACITY),
+-
+-/*
+- * Patch by Jost Diederichs <jost@qdusa.com>
+- */
+-UNUSUAL_DEV(0x22b8, 0x6410, 0x0001, 0x9999,
+-              "Motorola Inc.",
+-              "Motorola Phone (RAZRV3xx)",
+-              US_SC_DEVICE, US_PR_DEVICE, NULL,
+-              US_FL_FIX_CAPACITY),
+-
+-/*
+  * Patch by Constantin Baranov <const@tltsu.ru>
+  * Report by Andreas Koenecke.
+  * Motorola ROKR Z6.
+--- kernel-maemo-2.6.28.test.orig/drivers/usb/storage/usb.c
++++ kernel-maemo-2.6.28.test/drivers/usb/storage/usb.c
+@@ -126,6 +126,8 @@
+ { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin,bcdDeviceMax), \
+   .driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
++#define COMPLIANT_DEV UNUSUAL_DEV
++
+ #define USUAL_DEV(useProto, useTrans, useType) \
+ { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, useProto, useTrans), \
+   .driver_info = (USB_US_TYPE_STOR<<24) }
+@@ -134,6 +136,7 @@
+ #     include "unusual_devs.h"
+ #undef UNUSUAL_DEV
++#undef COMPLIANT_DEV
+ #undef USUAL_DEV
+       /* Terminating entry */
+       { }
+@@ -164,6 +167,8 @@
+       .initFunction = init_function,  \
+ }
++#define COMPLIANT_DEV UNUSUAL_DEV
++
+ #define USUAL_DEV(use_protocol, use_transport, use_type) \
+ { \
+       .useProtocol = use_protocol,    \
+@@ -173,6 +178,7 @@
+ static struct us_unusual_dev us_unusual_dev_list[] = {
+ #     include "unusual_devs.h" 
+ #     undef UNUSUAL_DEV
++#     undef COMPLIANT_DEV
+ #     undef USUAL_DEV
+       /* Terminating entry */
+--- kernel-maemo-2.6.28.test.orig/drivers/usb/storage/usb.h
++++ kernel-maemo-2.6.28.test/drivers/usb/storage/usb.h
+@@ -155,6 +155,10 @@
+ #ifdef CONFIG_PM
+       pm_hook                 suspend_resume_hook;
+ #endif
++
++      /* hacks for READ CAPACITY bug handling */
++      int                     use_last_sector_hacks;
++      int                     last_sector_retries;
+ };
+ /* Convert between us_data and the corresponding Scsi_Host */
+--- kernel-maemo-2.6.28.test.orig/drivers/video/aty/mach64_ct.c
++++ kernel-maemo-2.6.28.test/drivers/video/aty/mach64_ct.c
+@@ -8,6 +8,9 @@
+ #include <asm/io.h>
+ #include <video/mach64.h>
+ #include "atyfb.h"
++#ifdef CONFIG_PPC
++#include <asm/machdep.h>
++#endif
+ #undef DEBUG
+@@ -536,6 +539,14 @@
+       pll->ct.xclk_post_div_real = postdividers[xpost_div];
+       pll->ct.mclk_fb_div = q * pll->ct.xclk_post_div_real / 8;
++#ifdef CONFIG_PPC
++      if (machine_is(powermac)) {
++              /* Override PLL_EXT_CNTL & 0x07. */
++              pll->ct.xclk_post_div = xpost_div;
++              pll->ct.xclk_ref_div = 1;
++      }
++#endif
++
+ #ifdef DEBUG
+       pllmclk = (1000000 * pll->ct.mclk_fb_mult * pll->ct.mclk_fb_div) /
+                       (par->ref_clk_per * pll->ct.pll_ref_div);
+--- kernel-maemo-2.6.28.test.orig/drivers/w1/slaves/w1_therm.c
++++ kernel-maemo-2.6.28.test/drivers/w1/slaves/w1_therm.c
+@@ -115,7 +115,7 @@
+ static inline int w1_DS18B20_convert_temp(u8 rom[9])
+ {
+-      s16 t = (rom[1] << 8) | rom[0];
++      int t = ((s16)rom[1] << 8) | rom[0];
+       t = t*1000/16;
+       return t;
+ }
+--- kernel-maemo-2.6.28.test.orig/drivers/watchdog/Kconfig
++++ kernel-maemo-2.6.28.test/drivers/watchdog/Kconfig
+@@ -406,7 +406,7 @@
+       ---help---
+         Hardware driver for the intel TCO timer based watchdog devices.
+         These drivers are included in the Intel 82801 I/O Controller
+-        Hub family (from ICH0 up to ICH8) and in the Intel 6300ESB
++        Hub family (from ICH0 up to ICH10) and in the Intel 63xxESB
+         controller hub.
+         The TCO (Total Cost of Ownership) timer is a watchdog timer
+--- kernel-maemo-2.6.28.test.orig/drivers/watchdog/iTCO_vendor_support.c
++++ kernel-maemo-2.6.28.test/drivers/watchdog/iTCO_vendor_support.c
+@@ -1,7 +1,7 @@
+ /*
+  *    intel TCO vendor specific watchdog driver support
+  *
+- *    (c) Copyright 2006-2008 Wim Van Sebroeck <wim@iguana.be>.
++ *    (c) Copyright 2006-2009 Wim Van Sebroeck <wim@iguana.be>.
+  *
+  *    This program is free software; you can redistribute it and/or
+  *    modify it under the terms of the GNU General Public License
+@@ -19,7 +19,7 @@
+ /* Module and version information */
+ #define DRV_NAME      "iTCO_vendor_support"
+-#define DRV_VERSION   "1.02"
++#define DRV_VERSION   "1.03"
+ #define PFX           DRV_NAME ": "
+ /* Includes */
+@@ -77,6 +77,26 @@
+  *        20.6 seconds.
+  */
++static void supermicro_old_pre_start(unsigned long acpibase)
++{
++      unsigned long val32;
++
++      /* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */
++      val32 = inl(SMI_EN);
++      val32 &= 0xffffdfff;    /* Turn off SMI clearing watchdog */
++      outl(val32, SMI_EN);    /* Needed to activate watchdog */
++}
++
++static void supermicro_old_pre_stop(unsigned long acpibase)
++{
++      unsigned long val32;
++
++      /* Bit 13: TCO_EN -> 1 = Enables the TCO logic to generate SMI# */
++      val32 = inl(SMI_EN);
++      val32 |= 0x00002000;    /* Turn on SMI clearing watchdog */
++      outl(val32, SMI_EN);    /* Needed to deactivate watchdog */
++}
++
+ static void supermicro_old_pre_keepalive(unsigned long acpibase)
+ {
+       /* Reload TCO Timer (done in iTCO_wdt_keepalive) + */
+@@ -228,14 +248,18 @@
+ void iTCO_vendor_pre_start(unsigned long acpibase,
+                          unsigned int heartbeat)
+ {
+-      if (vendorsupport == SUPERMICRO_NEW_BOARD)
++      if (vendorsupport == SUPERMICRO_OLD_BOARD)
++              supermicro_old_pre_start(acpibase);
++      else if (vendorsupport == SUPERMICRO_NEW_BOARD)
+               supermicro_new_pre_start(heartbeat);
+ }
+ EXPORT_SYMBOL(iTCO_vendor_pre_start);
+ void iTCO_vendor_pre_stop(unsigned long acpibase)
+ {
+-      if (vendorsupport == SUPERMICRO_NEW_BOARD)
++      if (vendorsupport == SUPERMICRO_OLD_BOARD)
++              supermicro_old_pre_stop(acpibase);
++      else if (vendorsupport == SUPERMICRO_NEW_BOARD)
+               supermicro_new_pre_stop();
+ }
+ EXPORT_SYMBOL(iTCO_vendor_pre_stop);
+--- kernel-maemo-2.6.28.test.orig/drivers/watchdog/iTCO_wdt.c
++++ kernel-maemo-2.6.28.test/drivers/watchdog/iTCO_wdt.c
+@@ -1,7 +1,7 @@
+ /*
+- *    intel TCO Watchdog Driver (Used in i82801 and i6300ESB chipsets)
++ *    intel TCO Watchdog Driver (Used in i82801 and i63xxESB chipsets)
+  *
+- *    (c) Copyright 2006-2008 Wim Van Sebroeck <wim@iguana.be>.
++ *    (c) Copyright 2006-2009 Wim Van Sebroeck <wim@iguana.be>.
+  *
+  *    This program is free software; you can redistribute it and/or
+  *    modify it under the terms of the GNU General Public License
+@@ -63,7 +63,7 @@
+ /* Module and version information */
+ #define DRV_NAME      "iTCO_wdt"
+-#define DRV_VERSION   "1.04"
++#define DRV_VERSION   "1.05"
+ #define PFX           DRV_NAME ": "
+ /* Includes */
+@@ -236,16 +236,16 @@
+ /* Address definitions for the TCO */
+ /* TCO base address */
+-#define       TCOBASE         iTCO_wdt_private.ACPIBASE + 0x60
++#define TCOBASE               iTCO_wdt_private.ACPIBASE + 0x60
+ /* SMI Control and Enable Register */
+-#define       SMI_EN          iTCO_wdt_private.ACPIBASE + 0x30
++#define SMI_EN                iTCO_wdt_private.ACPIBASE + 0x30
+ #define TCO_RLD               TCOBASE + 0x00  /* TCO Timer Reload and Curr. Value */
+ #define TCOv1_TMR     TCOBASE + 0x01  /* TCOv1 Timer Initial Value    */
+-#define       TCO_DAT_IN      TCOBASE + 0x02  /* TCO Data In Register         */
+-#define       TCO_DAT_OUT     TCOBASE + 0x03  /* TCO Data Out Register        */
+-#define       TCO1_STS        TCOBASE + 0x04  /* TCO1 Status Register         */
+-#define       TCO2_STS        TCOBASE + 0x06  /* TCO2 Status Register         */
++#define TCO_DAT_IN    TCOBASE + 0x02  /* TCO Data In Register         */
++#define TCO_DAT_OUT   TCOBASE + 0x03  /* TCO Data Out Register        */
++#define TCO1_STS      TCOBASE + 0x04  /* TCO1 Status Register         */
++#define TCO2_STS      TCOBASE + 0x06  /* TCO2 Status Register         */
+ #define TCO1_CNT      TCOBASE + 0x08  /* TCO1 Control Register        */
+ #define TCO2_CNT      TCOBASE + 0x0a  /* TCO2 Control Register        */
+ #define TCOv2_TMR     TCOBASE + 0x12  /* TCOv2 Timer Initial Value    */
+@@ -338,7 +338,6 @@
+ static int iTCO_wdt_start(void)
+ {
+       unsigned int val;
+-      unsigned long val32;
+       spin_lock(&iTCO_wdt_private.io_lock);
+@@ -351,11 +350,6 @@
+               return -EIO;
+       }
+-      /* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */
+-      val32 = inl(SMI_EN);
+-      val32 &= 0xffffdfff;    /* Turn off SMI clearing watchdog */
+-      outl(val32, SMI_EN);
+-
+       /* Force the timer to its reload value by writing to the TCO_RLD
+          register */
+       if (iTCO_wdt_private.iTCO_version == 2)
+@@ -378,7 +372,6 @@
+ static int iTCO_wdt_stop(void)
+ {
+       unsigned int val;
+-      unsigned long val32;
+       spin_lock(&iTCO_wdt_private.io_lock);
+@@ -390,11 +383,6 @@
+       outw(val, TCO1_CNT);
+       val = inw(TCO1_CNT);
+-      /* Bit 13: TCO_EN -> 1 = Enables the TCO logic to generate SMI# */
+-      val32 = inl(SMI_EN);
+-      val32 |= 0x00002000;
+-      outl(val32, SMI_EN);
+-
+       /* Set the NO_REBOOT bit to prevent later reboots, just for sure */
+       iTCO_wdt_set_NO_REBOOT_bit();
+@@ -649,6 +637,7 @@
+       int ret;
+       u32 base_address;
+       unsigned long RCBA;
++      unsigned long val32;
+       /*
+        *      Find the ACPI/PM base I/O address which is the base
+@@ -695,6 +684,10 @@
+               ret = -EIO;
+               goto out;
+       }
++      /* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */
++      val32 = inl(SMI_EN);
++      val32 &= 0xffffdfff;    /* Turn off SMI clearing watchdog */
++      outl(val32, SMI_EN);
+       /* The TCO I/O registers reside in a 32-byte range pointed to
+          by the TCOBASE value */
+--- kernel-maemo-2.6.28.test.orig/drivers/watchdog/ks8695_wdt.c
++++ kernel-maemo-2.6.28.test/drivers/watchdog/ks8695_wdt.c
+@@ -21,6 +21,7 @@
+ #include <linux/watchdog.h>
+ #include <linux/io.h>
+ #include <linux/uaccess.h>
++#include <mach/timex.h>
+ #include <mach/regs-timer.h>
+ #define WDT_DEFAULT_TIME      5       /* seconds */
+--- kernel-maemo-2.6.28.test.orig/drivers/watchdog/rc32434_wdt.c
++++ kernel-maemo-2.6.28.test/drivers/watchdog/rc32434_wdt.c
+@@ -34,104 +34,89 @@
+ #include <asm/time.h>
+ #include <asm/mach-rc32434/integ.h>
+-#define MAX_TIMEOUT                   20
+-#define RC32434_WDT_INTERVAL          (15 * HZ)
+-
+-#define VERSION "0.2"
++#define VERSION "0.4"
+ static struct {
+-      struct completion stop;
+-      int running;
+-      struct timer_list timer;
+-      int queue;
+-      int default_ticks;
+       unsigned long inuse;
+ } rc32434_wdt_device;
+ static struct integ __iomem *wdt_reg;
+-static int ticks = 100 * HZ;
+ static int expect_close;
+-static int timeout;
++
++/* Board internal clock speed in Hz,
++ * the watchdog timer ticks at. */
++extern unsigned int idt_cpu_freq;
++
++/* translate wtcompare value to seconds and vice versa */
++#define WTCOMP2SEC(x) (x / idt_cpu_freq)
++#define SEC2WTCOMP(x) (x * idt_cpu_freq)
++
++/* Use a default timeout of 20s. This should be
++ * safe for CPU clock speeds up to 400MHz, as
++ * ((2 ^ 32) - 1) / (400MHz / 2) = 21s.  */
++#define WATCHDOG_TIMEOUT 20
++
++static int timeout = WATCHDOG_TIMEOUT;
+ static int nowayout = WATCHDOG_NOWAYOUT;
+ module_param(nowayout, int, 0);
+ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+       __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
++/* apply or and nand masks to data read from addr and write back */
++#define SET_BITS(addr, or, nand) \
++      writel((readl(&addr) | or) & ~nand, &addr)
+ static void rc32434_wdt_start(void)
+ {
+-      u32 val;
++      u32 or, nand;
+-      if (!rc32434_wdt_device.inuse) {
+-              writel(0, &wdt_reg->wtcount);
++      /* zero the counter before enabling */
++      writel(0, &wdt_reg->wtcount);
+-              val = RC32434_ERR_WRE;
+-              writel(readl(&wdt_reg->errcs) | val, &wdt_reg->errcs);
++      /* don't generate a non-maskable interrupt,
++       * do a warm reset instead */
++      nand = 1 << RC32434_ERR_WNE;
++      or = 1 << RC32434_ERR_WRE;
+-              val = RC32434_WTC_EN;
+-              writel(readl(&wdt_reg->wtc) | val, &wdt_reg->wtc);
+-      }
+-      rc32434_wdt_device.running++;
+-}
++      /* reset the ERRCS timeout bit in case it's set */
++      nand |= 1 << RC32434_ERR_WTO;
+-static void rc32434_wdt_stop(void)
+-{
+-      u32 val;
+-
+-      if (rc32434_wdt_device.running) {
++      SET_BITS(wdt_reg->errcs, or, nand);
+-              val = ~RC32434_WTC_EN;
+-              writel(readl(&wdt_reg->wtc) & val, &wdt_reg->wtc);
++      /* reset WTC timeout bit and enable WDT */
++      nand = 1 << RC32434_WTC_TO;
++      or = 1 << RC32434_WTC_EN;
+-              val = ~RC32434_ERR_WRE;
+-              writel(readl(&wdt_reg->errcs) & val, &wdt_reg->errcs);
++      SET_BITS(wdt_reg->wtc, or, nand);
++}
+-              rc32434_wdt_device.running = 0;
+-      }
++static void rc32434_wdt_stop(void)
++{
++      /* Disable WDT */
++      SET_BITS(wdt_reg->wtc, 0, 1 << RC32434_WTC_EN);
+ }
+-static void rc32434_wdt_set(int new_timeout)
++static int rc32434_wdt_set(int new_timeout)
+ {
+-      u32 cmp = new_timeout * HZ;
+-      u32 state, val;
++      int max_to = WTCOMP2SEC((u32)-1);
++      if (new_timeout < 0 || new_timeout > max_to) {
++              printk(KERN_ERR KBUILD_MODNAME
++                      ": timeout value must be between 0 and %d",
++                      max_to);
++              return -EINVAL;
++      }
+       timeout = new_timeout;
+-      /*
+-       * store and disable WTC
+-       */
+-      state = (u32)(readl(&wdt_reg->wtc) & RC32434_WTC_EN);
+-      val = ~RC32434_WTC_EN;
+-      writel(readl(&wdt_reg->wtc) & val, &wdt_reg->wtc);
+-
+-      writel(0, &wdt_reg->wtcount);
+-      writel(cmp, &wdt_reg->wtcompare);
+-
+-      /*
+-       * restore WTC
+-       */
++      writel(SEC2WTCOMP(timeout), &wdt_reg->wtcompare);
+-      writel(readl(&wdt_reg->wtc) | state, &wdt_reg);
+-}
+-
+-static void rc32434_wdt_reset(void)
+-{
+-      ticks = rc32434_wdt_device.default_ticks;
++      return 0;
+ }
+-static void rc32434_wdt_update(unsigned long unused)
++static void rc32434_wdt_ping(void)
+ {
+-      if (rc32434_wdt_device.running)
+-              ticks--;
+-
+       writel(0, &wdt_reg->wtcount);
+-
+-      if (rc32434_wdt_device.queue && ticks)
+-              mod_timer(&rc32434_wdt_device.timer,
+-                      jiffies + RC32434_WDT_INTERVAL);
+-      else
+-              complete(&rc32434_wdt_device.stop);
+ }
+ static int rc32434_wdt_open(struct inode *inode, struct file *file)
+@@ -142,19 +127,23 @@
+       if (nowayout)
+               __module_get(THIS_MODULE);
++      rc32434_wdt_start();
++      rc32434_wdt_ping();
++
+       return nonseekable_open(inode, file);
+ }
+ static int rc32434_wdt_release(struct inode *inode, struct file *file)
+ {
+-      if (expect_close && nowayout == 0) {
++      if (expect_close == 42) {
+               rc32434_wdt_stop();
+               printk(KERN_INFO KBUILD_MODNAME ": disabling watchdog timer\n");
+               module_put(THIS_MODULE);
+-      } else
++      } else {
+               printk(KERN_CRIT KBUILD_MODNAME
+                       ": device closed unexpectedly. WDT will not stop !\n");
+-
++              rc32434_wdt_ping();
++      }
+       clear_bit(0, &rc32434_wdt_device.inuse);
+       return 0;
+ }
+@@ -174,10 +163,10 @@
+                               if (get_user(c, data + i))
+                                       return -EFAULT;
+                               if (c == 'V')
+-                                      expect_close = 1;
++                                      expect_close = 42;
+                       }
+               }
+-              rc32434_wdt_update(0);
++              rc32434_wdt_ping();
+               return len;
+       }
+       return 0;
+@@ -197,11 +186,11 @@
+       };
+       switch (cmd) {
+       case WDIOC_KEEPALIVE:
+-              rc32434_wdt_reset();
++              rc32434_wdt_ping();
+               break;
+       case WDIOC_GETSTATUS:
+       case WDIOC_GETBOOTSTATUS:
+-              value = readl(&wdt_reg->wtcount);
++              value = 0;
+               if (copy_to_user(argp, &value, sizeof(int)))
+                       return -EFAULT;
+               break;
+@@ -218,6 +207,7 @@
+                       break;
+               case WDIOS_DISABLECARD:
+                       rc32434_wdt_stop();
++                      break;
+               default:
+                       return -EINVAL;
+               }
+@@ -225,11 +215,9 @@
+       case WDIOC_SETTIMEOUT:
+               if (copy_from_user(&new_timeout, argp, sizeof(int)))
+                       return -EFAULT;
+-              if (new_timeout < 1)
++              if (rc32434_wdt_set(new_timeout))
+                       return -EINVAL;
+-              if (new_timeout > MAX_TIMEOUT)
+-                      return -EINVAL;
+-              rc32434_wdt_set(new_timeout);
++              /* Fall through */
+       case WDIOC_GETTIMEOUT:
+               return copy_to_user(argp, &timeout, sizeof(int));
+       default:
+@@ -254,15 +242,15 @@
+       .fops   = &rc32434_wdt_fops,
+ };
+-static char banner[] = KERN_INFO KBUILD_MODNAME
++static char banner[] __devinitdata = KERN_INFO KBUILD_MODNAME
+               ": Watchdog Timer version " VERSION ", timer margin: %d sec\n";
+-static int rc32434_wdt_probe(struct platform_device *pdev)
++static int __devinit rc32434_wdt_probe(struct platform_device *pdev)
+ {
+       int ret;
+       struct resource *r;
+-      r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rb500_wdt_res");
++      r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rb532_wdt_res");
+       if (!r) {
+               printk(KERN_ERR KBUILD_MODNAME
+                       "failed to retrieve resources\n");
+@@ -277,24 +265,12 @@
+       }
+       ret = misc_register(&rc32434_wdt_miscdev);
+-
+       if (ret < 0) {
+               printk(KERN_ERR KBUILD_MODNAME
+                       "failed to register watchdog device\n");
+               goto unmap;
+       }
+-      init_completion(&rc32434_wdt_device.stop);
+-      rc32434_wdt_device.queue = 0;
+-
+-      clear_bit(0, &rc32434_wdt_device.inuse);
+-
+-      setup_timer(&rc32434_wdt_device.timer, rc32434_wdt_update, 0L);
+-
+-      rc32434_wdt_device.default_ticks = ticks;
+-
+-      rc32434_wdt_start();
+-
+       printk(banner, timeout);
+       return 0;
+@@ -304,23 +280,17 @@
+       return ret;
+ }
+-static int rc32434_wdt_remove(struct platform_device *pdev)
++static int __devexit rc32434_wdt_remove(struct platform_device *pdev)
+ {
+-      if (rc32434_wdt_device.queue) {
+-              rc32434_wdt_device.queue = 0;
+-              wait_for_completion(&rc32434_wdt_device.stop);
+-      }
+       misc_deregister(&rc32434_wdt_miscdev);
+-
+       iounmap(wdt_reg);
+-
+       return 0;
+ }
+ static struct platform_driver rc32434_wdt = {
+       .probe  = rc32434_wdt_probe,
+-      .remove = rc32434_wdt_remove,
+-      .driver = {
++      .remove = __devexit_p(rc32434_wdt_remove),
++      .driver = {
+               .name = "rc32434_wdt",
+       }
+ };
+--- kernel-maemo-2.6.28.test.orig/drivers/xen/balloon.c
++++ kernel-maemo-2.6.28.test/drivers/xen/balloon.c
+@@ -488,7 +488,7 @@
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+-      target_bytes = memparse(buf, &endchar);
++      target_bytes = simple_strtoull(buf, &endchar, 0) * 1024;
+       balloon_set_new_target(target_bytes >> PAGE_SHIFT);
+@@ -498,8 +498,39 @@
+ static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR,
+                  show_target_kb, store_target_kb);
++
++static ssize_t show_target(struct sys_device *dev, struct sysdev_attribute *attr,
++                            char *buf)
++{
++      return sprintf(buf, "%llu\n",
++                     (u64)balloon_stats.target_pages << PAGE_SHIFT);
++}
++
++static ssize_t store_target(struct sys_device *dev,
++                          struct sysdev_attribute *attr,
++                          const char *buf,
++                          size_t count)
++{
++      char *endchar;
++      unsigned long long target_bytes;
++
++      if (!capable(CAP_SYS_ADMIN))
++              return -EPERM;
++
++      target_bytes = memparse(buf, &endchar);
++
++      balloon_set_new_target(target_bytes >> PAGE_SHIFT);
++
++      return count;
++}
++
++static SYSDEV_ATTR(target, S_IRUGO | S_IWUSR,
++                 show_target, store_target);
++
++
+ static struct sysdev_attribute *balloon_attrs[] = {
+       &attr_target_kb,
++      &attr_target,
+ };
+ static struct attribute *balloon_info_attrs[] = {
+--- kernel-maemo-2.6.28.test.orig/fs/affs/file.c
++++ kernel-maemo-2.6.28.test/fs/affs/file.c
+@@ -628,7 +628,7 @@
+       }
+       index = pos >> PAGE_CACHE_SHIFT;
+-      page = __grab_cache_page(mapping, index);
++      page = grab_cache_page_write_begin(mapping, index, flags);
+       if (!page)
+               return -ENOMEM;
+       *pagep = page;
+--- kernel-maemo-2.6.28.test.orig/fs/afs/write.c
++++ kernel-maemo-2.6.28.test/fs/afs/write.c
+@@ -144,7 +144,7 @@
+       candidate->state = AFS_WBACK_PENDING;
+       init_waitqueue_head(&candidate->waitq);
+-      page = __grab_cache_page(mapping, index);
++      page = grab_cache_page_write_begin(mapping, index, flags);
+       if (!page) {
+               kfree(candidate);
+               return -ENOMEM;
+--- kernel-maemo-2.6.28.test.orig/fs/aio.c
++++ kernel-maemo-2.6.28.test/fs/aio.c
+@@ -428,7 +428,7 @@
+       req->private = NULL;
+       req->ki_iovec = NULL;
+       INIT_LIST_HEAD(&req->ki_run_list);
+-      req->ki_eventfd = ERR_PTR(-EINVAL);
++      req->ki_eventfd = NULL;
+       /* Check if the completion queue has enough free space to
+        * accept an event from this io.
+@@ -470,8 +470,6 @@
+ {
+       assert_spin_locked(&ctx->ctx_lock);
+-      if (!IS_ERR(req->ki_eventfd))
+-              fput(req->ki_eventfd);
+       if (req->ki_dtor)
+               req->ki_dtor(req);
+       if (req->ki_iovec != &req->ki_inline_vec)
+@@ -493,8 +491,11 @@
+               list_del(&req->ki_list);
+               spin_unlock_irq(&fput_lock);
+-              /* Complete the fput */
+-              __fput(req->ki_filp);
++              /* Complete the fput(s) */
++              if (req->ki_filp != NULL)
++                      __fput(req->ki_filp);
++              if (req->ki_eventfd != NULL)
++                      __fput(req->ki_eventfd);
+               /* Link the iocb into the context's free list */
+               spin_lock_irq(&ctx->ctx_lock);
+@@ -512,12 +513,14 @@
+  */
+ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
+ {
++      int schedule_putreq = 0;
++
+       dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n",
+               req, atomic_long_read(&req->ki_filp->f_count));
+       assert_spin_locked(&ctx->ctx_lock);
+-      req->ki_users --;
++      req->ki_users--;
+       BUG_ON(req->ki_users < 0);
+       if (likely(req->ki_users))
+               return 0;
+@@ -525,10 +528,23 @@
+       req->ki_cancel = NULL;
+       req->ki_retry = NULL;
+-      /* Must be done under the lock to serialise against cancellation.
+-       * Call this aio_fput as it duplicates fput via the fput_work.
++      /*
++       * Try to optimize the aio and eventfd file* puts, by avoiding to
++       * schedule work in case it is not __fput() time. In normal cases,
++       * we would not be holding the last reference to the file*, so
++       * this function will be executed w/out any aio kthread wakeup.
+        */
+-      if (unlikely(atomic_long_dec_and_test(&req->ki_filp->f_count))) {
++      if (unlikely(atomic_long_dec_and_test(&req->ki_filp->f_count)))
++              schedule_putreq++;
++      else
++              req->ki_filp = NULL;
++      if (req->ki_eventfd != NULL) {
++              if (unlikely(atomic_long_dec_and_test(&req->ki_eventfd->f_count)))
++                      schedule_putreq++;
++              else
++                      req->ki_eventfd = NULL;
++      }
++      if (unlikely(schedule_putreq)) {
+               get_ioctx(ctx);
+               spin_lock(&fput_lock);
+               list_add(&req->ki_list, &fput_head);
+@@ -992,7 +1008,7 @@
+        * eventfd. The eventfd_signal() function is safe to be called
+        * from IRQ context.
+        */
+-      if (!IS_ERR(iocb->ki_eventfd))
++      if (iocb->ki_eventfd != NULL)
+               eventfd_signal(iocb->ki_eventfd, 1);
+ put_rq:
+@@ -1258,7 +1274,7 @@
+  *    pointer is passed for ctxp.  Will fail with -ENOSYS if not
+  *    implemented.
+  */
+-asmlinkage long sys_io_setup(unsigned nr_events, aio_context_t __user *ctxp)
++SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
+ {
+       struct kioctx *ioctx = NULL;
+       unsigned long ctx;
+@@ -1296,7 +1312,7 @@
+  *    implemented.  May fail with -EFAULT if the context pointed to
+  *    is invalid.
+  */
+-asmlinkage long sys_io_destroy(aio_context_t ctx)
++SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
+ {
+       struct kioctx *ioctx = lookup_ioctx(ctx);
+       if (likely(NULL != ioctx)) {
+@@ -1596,6 +1612,7 @@
+               req->ki_eventfd = eventfd_fget((int) iocb->aio_resfd);
+               if (IS_ERR(req->ki_eventfd)) {
+                       ret = PTR_ERR(req->ki_eventfd);
++                      req->ki_eventfd = NULL;
+                       goto out_put_req;
+               }
+       }
+@@ -1650,8 +1667,8 @@
+  *    are available to queue any iocbs.  Will return 0 if nr is 0.  Will
+  *    fail with -ENOSYS if not implemented.
+  */
+-asmlinkage long sys_io_submit(aio_context_t ctx_id, long nr,
+-                            struct iocb __user * __user *iocbpp)
++SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
++              struct iocb __user * __user *, iocbpp)
+ {
+       struct kioctx *ctx;
+       long ret = 0;
+@@ -1725,8 +1742,8 @@
+  *    invalid.  May fail with -EAGAIN if the iocb specified was not
+  *    cancelled.  Will fail with -ENOSYS if not implemented.
+  */
+-asmlinkage long sys_io_cancel(aio_context_t ctx_id, struct iocb __user *iocb,
+-                            struct io_event __user *result)
++SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
++              struct io_event __user *, result)
+ {
+       int (*cancel)(struct kiocb *iocb, struct io_event *res);
+       struct kioctx *ctx;
+@@ -1787,11 +1804,11 @@
+  *    will be updated if not NULL and the operation blocks.  Will fail
+  *    with -ENOSYS if not implemented.
+  */
+-asmlinkage long sys_io_getevents(aio_context_t ctx_id,
+-                               long min_nr,
+-                               long nr,
+-                               struct io_event __user *events,
+-                               struct timespec __user *timeout)
++SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
++              long, min_nr,
++              long, nr,
++              struct io_event __user *, events,
++              struct timespec __user *, timeout)
+ {
+       struct kioctx *ioctx = lookup_ioctx(ctx_id);
+       long ret = -EINVAL;
+--- kernel-maemo-2.6.28.test.orig/fs/anon_inodes.c
++++ kernel-maemo-2.6.28.test/fs/anon_inodes.c
+@@ -79,9 +79,12 @@
+       if (IS_ERR(anon_inode_inode))
+               return -ENODEV;
++      if (fops->owner && !try_module_get(fops->owner))
++              return -ENOENT;
++
+       error = get_unused_fd_flags(flags);
+       if (error < 0)
+-              return error;
++              goto err_module;
+       fd = error;
+       /*
+@@ -128,6 +131,8 @@
+       dput(dentry);
+ err_put_unused_fd:
+       put_unused_fd(fd);
++err_module:
++      module_put(fops->owner);
+       return error;
+ }
+ EXPORT_SYMBOL_GPL(anon_inode_getfd);
+--- kernel-maemo-2.6.28.test.orig/fs/binfmt_elf.c
++++ kernel-maemo-2.6.28.test/fs/binfmt_elf.c
+@@ -1196,9 +1196,11 @@
+        * check for an ELF header.  If we find one, dump the first page to
+        * aid in determining what was mapped here.
+        */
+-      if (FILTER(ELF_HEADERS) && vma->vm_file != NULL && vma->vm_pgoff == 0) {
++      if (FILTER(ELF_HEADERS) &&
++          vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) {
+               u32 __user *header = (u32 __user *) vma->vm_start;
+               u32 word;
++              mm_segment_t fs = get_fs();
+               /*
+                * Doing it this way gets the constant folded by GCC.
+                */
+@@ -1211,7 +1213,15 @@
+               magic.elfmag[EI_MAG1] = ELFMAG1;
+               magic.elfmag[EI_MAG2] = ELFMAG2;
+               magic.elfmag[EI_MAG3] = ELFMAG3;
+-              if (get_user(word, header) == 0 && word == magic.cmp)
++              /*
++               * Switch to the user "segment" for get_user(),
++               * then put back what elf_core_dump() had in place.
++               */
++              set_fs(USER_DS);
++              if (unlikely(get_user(word, header)))
++                      word = 0;
++              set_fs(fs);
++              if (word == magic.cmp)
+                       return PAGE_SIZE;
+       }
+--- kernel-maemo-2.6.28.test.orig/fs/buffer.c
++++ kernel-maemo-2.6.28.test/fs/buffer.c
+@@ -1988,7 +1988,7 @@
+       page = *pagep;
+       if (page == NULL) {
+               ownpage = 1;
+-              page = __grab_cache_page(mapping, index);
++              page = grab_cache_page_write_begin(mapping, index, flags);
+               if (!page) {
+                       status = -ENOMEM;
+                       goto out;
+@@ -2494,7 +2494,7 @@
+       from = pos & (PAGE_CACHE_SIZE - 1);
+       to = from + len;
+-      page = __grab_cache_page(mapping, index);
++      page = grab_cache_page_write_begin(mapping, index, flags);
+       if (!page)
+               return -ENOMEM;
+       *pagep = page;
+@@ -3042,7 +3042,7 @@
+       if (test_clear_buffer_dirty(bh)) {
+               get_bh(bh);
+               bh->b_end_io = end_buffer_write_sync;
+-              ret = submit_bh(WRITE_SYNC, bh);
++              ret = submit_bh(WRITE, bh);
+               wait_on_buffer(bh);
+               if (buffer_eopnotsupp(bh)) {
+                       clear_buffer_eopnotsupp(bh);
+@@ -3177,7 +3177,7 @@
+  * Use of bdflush() is deprecated and will be removed in a future kernel.
+  * The `pdflush' kernel threads fully replace bdflush daemons and this call.
+  */
+-asmlinkage long sys_bdflush(int func, long data)
++SYSCALL_DEFINE2(bdflush, int, func, long, data)
+ {
+       static int msg_count;
+--- kernel-maemo-2.6.28.test.orig/fs/cifs/CHANGES
++++ kernel-maemo-2.6.28.test/fs/cifs/CHANGES
+@@ -1,3 +1,8 @@
++Fix oops in cifs_dfs_ref.c when prefixpath is not reachable when using DFS.
++Fix "redzone overwritten" bug in cifs_put_tcon (CIFSTcon may allocate too
++little memory for the "nativeFileSystem" field returned by the server
++during mount).
++
+ Version 1.55
+ ------------
+ Various fixes to make delete of open files behavior more predictable
+--- kernel-maemo-2.6.28.test.orig/fs/cifs/cifs_dfs_ref.c
++++ kernel-maemo-2.6.28.test/fs/cifs/cifs_dfs_ref.c
+@@ -122,7 +122,7 @@
+                                  char **devname)
+ {
+       int rc;
+-      char *mountdata;
++      char *mountdata = NULL;
+       int md_len;
+       char *tkn_e;
+       char *srvIP = NULL;
+@@ -136,10 +136,9 @@
+       *devname = cifs_get_share_name(ref->node_name);
+       rc = dns_resolve_server_name_to_ip(*devname, &srvIP);
+       if (rc != 0) {
+-              cERROR(1, ("%s: Failed to resolve server part of %s to IP",
+-                        __func__, *devname));
+-              mountdata = ERR_PTR(rc);
+-              goto compose_mount_options_out;
++              cERROR(1, ("%s: Failed to resolve server part of %s to IP: %d",
++                        __func__, *devname, rc));;
++              goto compose_mount_options_err;
+       }
+       /* md_len = strlen(...) + 12 for 'sep+prefixpath='
+        * assuming that we have 'unc=' and 'ip=' in
+@@ -149,8 +148,8 @@
+               strlen(ref->node_name) + 12;
+       mountdata = kzalloc(md_len+1, GFP_KERNEL);
+       if (mountdata == NULL) {
+-              mountdata = ERR_PTR(-ENOMEM);
+-              goto compose_mount_options_out;
++              rc = -ENOMEM;
++              goto compose_mount_options_err;
+       }
+       /* copy all options except of unc,ip,prefixpath */
+@@ -197,18 +196,32 @@
+       /* find & copy prefixpath */
+       tkn_e = strchr(ref->node_name + 2, '\\');
+-      if (tkn_e == NULL) /* invalid unc, missing share name*/
+-              goto compose_mount_options_out;
++      if (tkn_e == NULL) {
++              /* invalid unc, missing share name*/
++              rc = -EINVAL;
++              goto compose_mount_options_err;
++      }
++      /*
++       * this function gives us a path with a double backslash prefix. We
++       * require a single backslash for DFS. Temporarily increment fullpath
++       * to put it in the proper form and decrement before freeing it.
++       */
+       fullpath = build_path_from_dentry(dentry);
++      if (!fullpath) {
++              rc = -ENOMEM;
++              goto compose_mount_options_err;
++      }
++      ++fullpath;
+       tkn_e = strchr(tkn_e + 1, '\\');
+-      if (tkn_e || strlen(fullpath) - (ref->path_consumed)) {
++      if (tkn_e || (strlen(fullpath) - ref->path_consumed)) {
+               strncat(mountdata, &sep, 1);
+               strcat(mountdata, "prefixpath=");
+               if (tkn_e)
+                       strcat(mountdata, tkn_e + 1);
+-              strcat(mountdata, fullpath + (ref->path_consumed));
++              strcat(mountdata, fullpath + ref->path_consumed);
+       }
++      --fullpath;
+       kfree(fullpath);
+       /*cFYI(1,("%s: parent mountdata: %s", __func__,sb_mountdata));*/
+@@ -217,6 +230,11 @@
+ compose_mount_options_out:
+       kfree(srvIP);
+       return mountdata;
++
++compose_mount_options_err:
++      kfree(mountdata);
++      mountdata = ERR_PTR(rc);
++      goto compose_mount_options_out;
+ }
+@@ -309,13 +327,19 @@
+               goto out_err;
+       }
++      /*
++       * The MSDFS spec states that paths in DFS referral requests and
++       * responses must be prefixed by a single '\' character instead of
++       * the double backslashes usually used in the UNC. This function
++       * gives us the latter, so we must adjust the result.
++       */
+       full_path = build_path_from_dentry(dentry);
+       if (full_path == NULL) {
+               rc = -ENOMEM;
+               goto out_err;
+       }
+-      rc = get_dfs_path(xid, ses , full_path, cifs_sb->local_nls,
++      rc = get_dfs_path(xid, ses , full_path + 1, cifs_sb->local_nls,
+               &num_referrals, &referrals,
+               cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
+--- kernel-maemo-2.6.28.test.orig/fs/cifs/cifssmb.c
++++ kernel-maemo-2.6.28.test/fs/cifs/cifssmb.c
+@@ -2350,8 +2350,10 @@
+                                    PATH_MAX, nls_codepage, remap);
+               name_len++;     /* trailing null */
+               name_len *= 2;
+-              pSMB->OldFileName[name_len] = 0;        /* pad */
+-              pSMB->OldFileName[name_len + 1] = 0x04;
++
++              /* protocol specifies ASCII buffer format (0x04) for unicode */
++              pSMB->OldFileName[name_len] = 0x04;
++              pSMB->OldFileName[name_len + 1] = 0x00; /* pad */
+               name_len2 =
+                   cifsConvertToUCS((__le16 *)&pSMB->OldFileName[name_len + 2],
+                                    toName, PATH_MAX, nls_codepage, remap);
+--- kernel-maemo-2.6.28.test.orig/fs/cifs/connect.c
++++ kernel-maemo-2.6.28.test/fs/cifs/connect.c
+@@ -1356,7 +1356,7 @@
+ }
+ static struct TCP_Server_Info *
+-cifs_find_tcp_session(struct sockaddr *addr)
++cifs_find_tcp_session(struct sockaddr_storage *addr)
+ {
+       struct list_head *tmp;
+       struct TCP_Server_Info *server;
+@@ -1376,11 +1376,11 @@
+               if (server->tcpStatus == CifsNew)
+                       continue;
+-              if (addr->sa_family == AF_INET &&
++              if (addr->ss_family == AF_INET &&
+                   (addr4->sin_addr.s_addr !=
+                    server->addr.sockAddr.sin_addr.s_addr))
+                       continue;
+-              else if (addr->sa_family == AF_INET6 &&
++              else if (addr->ss_family == AF_INET6 &&
+                        memcmp(&server->addr.sockAddr6.sin6_addr,
+                               &addr6->sin6_addr, sizeof(addr6->sin6_addr)))
+                       continue;
+@@ -2036,7 +2036,7 @@
+       int rc = 0;
+       int xid;
+       struct socket *csocket = NULL;
+-      struct sockaddr addr;
++      struct sockaddr_storage addr;
+       struct sockaddr_in *sin_server = (struct sockaddr_in *) &addr;
+       struct sockaddr_in6 *sin_server6 = (struct sockaddr_in6 *) &addr;
+       struct smb_vol volume_info;
+@@ -2048,7 +2048,7 @@
+ /* cFYI(1, ("Entering cifs_mount. Xid: %d with: %s", xid, mount_data)); */
+-      memset(&addr, 0, sizeof(struct sockaddr));
++      memset(&addr, 0, sizeof(struct sockaddr_storage));
+       memset(&volume_info, 0, sizeof(struct smb_vol));
+       if (cifs_parse_mount_options(mount_data, devname, &volume_info)) {
+               rc = -EINVAL;
+@@ -2078,9 +2078,9 @@
+                       rc = cifs_inet_pton(AF_INET6, volume_info.UNCip,
+                                           &sin_server6->sin6_addr.in6_u);
+                       if (rc > 0)
+-                              addr.sa_family = AF_INET6;
++                              addr.ss_family = AF_INET6;
+               } else {
+-                      addr.sa_family = AF_INET;
++                      addr.ss_family = AF_INET;
+               }
+               if (rc <= 0) {
+@@ -2122,7 +2122,7 @@
+       srvTcp = cifs_find_tcp_session(&addr);
+       if (!srvTcp) { /* create socket */
+-              if (addr.sa_family == AF_INET6) {
++              if (addr.ss_family == AF_INET6) {
+                       cFYI(1, ("attempting ipv6 connect"));
+                       /* BB should we allow ipv6 on port 139? */
+                       /* other OS never observed in Wild doing 139 with v6 */
+@@ -2153,7 +2153,7 @@
+               } else {
+                       srvTcp->noblocksnd = volume_info.noblocksnd;
+                       srvTcp->noautotune = volume_info.noautotune;
+-                      if (addr.sa_family == AF_INET6)
++                      if (addr.ss_family == AF_INET6)
+                               memcpy(&srvTcp->addr.sockAddr6, sin_server6,
+                                       sizeof(struct sockaddr_in6));
+                       else
+@@ -3565,7 +3565,7 @@
+                           BCC(smb_buffer_response)) {
+                               kfree(tcon->nativeFileSystem);
+                               tcon->nativeFileSystem =
+-                                  kzalloc(length + 2, GFP_KERNEL);
++                                  kzalloc(2*(length + 1), GFP_KERNEL);
+                               if (tcon->nativeFileSystem)
+                                       cifs_strfromUCS_le(
+                                               tcon->nativeFileSystem,
+--- kernel-maemo-2.6.28.test.orig/fs/cifs/file.c
++++ kernel-maemo-2.6.28.test/fs/cifs/file.c
+@@ -2073,7 +2073,7 @@
+       cFYI(1, ("write_begin from %lld len %d", (long long)pos, len));
+-      page = __grab_cache_page(mapping, index);
++      page = grab_cache_page_write_begin(mapping, index, flags);
+       if (!page) {
+               rc = -ENOMEM;
+               goto out;
+--- kernel-maemo-2.6.28.test.orig/fs/cifs/sess.c
++++ kernel-maemo-2.6.28.test/fs/cifs/sess.c
+@@ -228,7 +228,7 @@
+       kfree(ses->serverOS);
+       /* UTF-8 string will not grow more than four times as big as UCS-16 */
+-      ses->serverOS = kzalloc(4 * len, GFP_KERNEL);
++      ses->serverOS = kzalloc((4 * len) + 2 /* trailing null */, GFP_KERNEL);
+       if (ses->serverOS != NULL)
+               cifs_strfromUCS_le(ses->serverOS, (__le16 *)data, len, nls_cp);
+       data += 2 * (len + 1);
+@@ -241,7 +241,7 @@
+               return rc;
+       kfree(ses->serverNOS);
+-      ses->serverNOS = kzalloc(4 * len, GFP_KERNEL); /* BB this is wrong length FIXME BB */
++      ses->serverNOS = kzalloc((4 * len) + 2 /* trailing null */, GFP_KERNEL);
+       if (ses->serverNOS != NULL) {
+               cifs_strfromUCS_le(ses->serverNOS, (__le16 *)data, len,
+                                  nls_cp);
+--- kernel-maemo-2.6.28.test.orig/fs/compat.c
++++ kernel-maemo-2.6.28.test/fs/compat.c
+@@ -1386,12 +1386,17 @@
+ {
+       struct linux_binprm *bprm;
+       struct file *file;
++      struct files_struct *displaced;
+       int retval;
++      retval = unshare_files(&displaced);
++      if (retval)
++              goto out_ret;
++
+       retval = -ENOMEM;
+       bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
+       if (!bprm)
+-              goto out_ret;
++              goto out_files;
+       file = open_exec(filename);
+       retval = PTR_ERR(file);
+@@ -1443,6 +1448,8 @@
+               security_bprm_free(bprm);
+               acct_update_integrals(current);
+               free_bprm(bprm);
++              if (displaced)
++                      put_files_struct(displaced);
+               return retval;
+       }
+@@ -1463,6 +1470,9 @@
+ out_kfree:
+       free_bprm(bprm);
++out_files:
++      if (displaced)
++              reset_files_struct(displaced);
+ out_ret:
+       return retval;
+ }
+@@ -1697,7 +1707,7 @@
+ }
+ #ifdef HAVE_SET_RESTORE_SIGMASK
+-asmlinkage long compat_sys_pselect7(int n, compat_ulong_t __user *inp,
++static long do_compat_pselect(int n, compat_ulong_t __user *inp,
+       compat_ulong_t __user *outp, compat_ulong_t __user *exp,
+       struct compat_timespec __user *tsp, compat_sigset_t __user *sigmask,
+       compat_size_t sigsetsize)
+@@ -1763,8 +1773,8 @@
+                               (compat_size_t __user *)(sig+sizeof(up))))
+                       return -EFAULT;
+       }
+-      return compat_sys_pselect7(n, inp, outp, exp, tsp, compat_ptr(up),
+-                                      sigsetsize);
++      return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(up),
++                               sigsetsize);
+ }
+ asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
+--- kernel-maemo-2.6.28.test.orig/fs/compat_ioctl.c
++++ kernel-maemo-2.6.28.test/fs/compat_ioctl.c
+@@ -538,6 +538,7 @@
+                * cannot be fixed without breaking all existing apps.
+                */
+               case TUNSETIFF:
++              case TUNGETIFF:
+               case SIOCGIFFLAGS:
+               case SIOCGIFMETRIC:
+               case SIOCGIFMTU:
+@@ -1937,6 +1938,8 @@
+ /* Big K */
+ COMPATIBLE_IOCTL(PIO_FONT)
+ COMPATIBLE_IOCTL(GIO_FONT)
++COMPATIBLE_IOCTL(PIO_CMAP)
++COMPATIBLE_IOCTL(GIO_CMAP)
+ ULONG_IOCTL(KDSIGACCEPT)
+ COMPATIBLE_IOCTL(KDGETKEYCODE)
+ COMPATIBLE_IOCTL(KDSETKEYCODE)
+@@ -1982,6 +1985,11 @@
+ COMPATIBLE_IOCTL(TUNSETDEBUG)
+ COMPATIBLE_IOCTL(TUNSETPERSIST)
+ COMPATIBLE_IOCTL(TUNSETOWNER)
++COMPATIBLE_IOCTL(TUNSETLINK)
++COMPATIBLE_IOCTL(TUNSETGROUP)
++COMPATIBLE_IOCTL(TUNGETFEATURES)
++COMPATIBLE_IOCTL(TUNSETOFFLOAD)
++COMPATIBLE_IOCTL(TUNSETTXFILTER)
+ /* Big V */
+ COMPATIBLE_IOCTL(VT_SETMODE)
+ COMPATIBLE_IOCTL(VT_GETMODE)
+@@ -2573,6 +2581,7 @@
+ HANDLE_IOCTL(SIOCGIFTXQLEN, dev_ifsioc)
+ HANDLE_IOCTL(SIOCSIFTXQLEN, dev_ifsioc)
+ HANDLE_IOCTL(TUNSETIFF, dev_ifsioc)
++HANDLE_IOCTL(TUNGETIFF, dev_ifsioc)
+ HANDLE_IOCTL(SIOCETHTOOL, ethtool_ioctl)
+ HANDLE_IOCTL(SIOCBONDENSLAVE, bond_ioctl)
+ HANDLE_IOCTL(SIOCBONDRELEASE, bond_ioctl)
+--- kernel-maemo-2.6.28.test.orig/fs/dcache.c
++++ kernel-maemo-2.6.28.test/fs/dcache.c
+@@ -1620,8 +1620,11 @@
+                        */
+                       memcpy(dentry->d_iname, target->d_name.name,
+                                       target->d_name.len + 1);
++                      dentry->d_name.len = target->d_name.len;
++                      return;
+               }
+       }
++      do_switch(dentry->d_name.len, target->d_name.len);
+ }
+ /*
+@@ -1681,7 +1684,6 @@
+       /* Switch the names.. */
+       switch_names(dentry, target);
+-      do_switch(dentry->d_name.len, target->d_name.len);
+       do_switch(dentry->d_name.hash, target->d_name.hash);
+       /* ... and switch the parents */
+@@ -1791,7 +1793,6 @@
+       struct dentry *dparent, *aparent;
+       switch_names(dentry, anon);
+-      do_switch(dentry->d_name.len, anon->d_name.len);
+       do_switch(dentry->d_name.hash, anon->d_name.hash);
+       dparent = dentry->d_parent;
+@@ -2095,7 +2096,7 @@
+  *            return NULL;
+  *    }
+  */
+-asmlinkage long sys_getcwd(char __user *buf, unsigned long size)
++SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
+ {
+       int error;
+       struct path pwd, root;
+--- kernel-maemo-2.6.28.test.orig/fs/dcookies.c
++++ kernel-maemo-2.6.28.test/fs/dcookies.c
+@@ -140,7 +140,7 @@
+ /* And here is where the userspace process can look up the cookie value
+  * to retrieve the path.
+  */
+-asmlinkage long sys_lookup_dcookie(u64 cookie64, char __user * buf, size_t len)
++SYSCALL_DEFINE(lookup_dcookie)(u64 cookie64, char __user * buf, size_t len)
+ {
+       unsigned long cookie = (unsigned long)cookie64;
+       int err = -EINVAL;
+@@ -193,7 +193,13 @@
+       mutex_unlock(&dcookie_mutex);
+       return err;
+ }
+-
++#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
++asmlinkage long SyS_lookup_dcookie(u64 cookie64, long buf, long len)
++{
++      return SYSC_lookup_dcookie(cookie64, (char __user *) buf, (size_t) len);
++}
++SYSCALL_ALIAS(sys_lookup_dcookie, SyS_lookup_dcookie);
++#endif
+ static int dcookie_init(void)
+ {
+--- kernel-maemo-2.6.28.test.orig/fs/dlm/plock.c
++++ kernel-maemo-2.6.28.test/fs/dlm/plock.c
+@@ -304,7 +304,9 @@
+       if (rv == -ENOENT)
+               rv = 0;
+       else if (rv > 0) {
++              locks_init_lock(fl);
+               fl->fl_type = (op->info.ex) ? F_WRLCK : F_RDLCK;
++              fl->fl_flags = FL_POSIX;
+               fl->fl_pid = op->info.pid;
+               fl->fl_start = op->info.start;
+               fl->fl_end = op->info.end;
+--- kernel-maemo-2.6.28.test.orig/fs/dquot.c
++++ kernel-maemo-2.6.28.test/fs/dquot.c
+@@ -724,7 +724,7 @@
+                       continue;
+               if (!dqinit_needed(inode, type))
+                       continue;
+-              if (inode->i_state & (I_FREEING|I_WILL_FREE))
++              if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE))
+                       continue;
+               __iget(inode);
+--- kernel-maemo-2.6.28.test.orig/fs/drop_caches.c
++++ kernel-maemo-2.6.28.test/fs/drop_caches.c
+@@ -18,7 +18,7 @@
+       spin_lock(&inode_lock);
+       list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
+-              if (inode->i_state & (I_FREEING|I_WILL_FREE))
++              if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE))
+                       continue;
+               if (inode->i_mapping->nrpages == 0)
+                       continue;
+--- kernel-maemo-2.6.28.test.orig/fs/ecryptfs/crypto.c
++++ kernel-maemo-2.6.28.test/fs/ecryptfs/crypto.c
+@@ -1310,14 +1310,13 @@
+ }
+ static int
+-ecryptfs_write_metadata_to_contents(struct ecryptfs_crypt_stat *crypt_stat,
+-                                  struct dentry *ecryptfs_dentry,
+-                                  char *virt)
++ecryptfs_write_metadata_to_contents(struct dentry *ecryptfs_dentry,
++                                  char *virt, size_t virt_len)
+ {
+       int rc;
+       rc = ecryptfs_write_lower(ecryptfs_dentry->d_inode, virt,
+-                                0, crypt_stat->num_header_bytes_at_front);
++                                0, virt_len);
+       if (rc)
+               printk(KERN_ERR "%s: Error attempting to write header "
+                      "information to lower file; rc = [%d]\n", __func__,
+@@ -1327,7 +1326,6 @@
+ static int
+ ecryptfs_write_metadata_to_xattr(struct dentry *ecryptfs_dentry,
+-                               struct ecryptfs_crypt_stat *crypt_stat,
+                                char *page_virt, size_t size)
+ {
+       int rc;
+@@ -1337,6 +1335,17 @@
+       return rc;
+ }
++static unsigned long ecryptfs_get_zeroed_pages(gfp_t gfp_mask,
++                                             unsigned int order)
++{
++      struct page *page;
++
++      page = alloc_pages(gfp_mask | __GFP_ZERO, order);
++      if (page)
++              return (unsigned long) page_address(page);
++      return 0;
++}
++
+ /**
+  * ecryptfs_write_metadata
+  * @ecryptfs_dentry: The eCryptfs dentry
+@@ -1353,7 +1362,9 @@
+ {
+       struct ecryptfs_crypt_stat *crypt_stat =
+               &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat;
++      unsigned int order;
+       char *virt;
++      size_t virt_len;
+       size_t size = 0;
+       int rc = 0;
+@@ -1369,33 +1380,35 @@
+               rc = -EINVAL;
+               goto out;
+       }
++      virt_len = crypt_stat->num_header_bytes_at_front;
++      order = get_order(virt_len);
+       /* Released in this function */
+-      virt = (char *)get_zeroed_page(GFP_KERNEL);
++      virt = (char *)ecryptfs_get_zeroed_pages(GFP_KERNEL, order);
+       if (!virt) {
+               printk(KERN_ERR "%s: Out of memory\n", __func__);
+               rc = -ENOMEM;
+               goto out;
+       }
+-      rc = ecryptfs_write_headers_virt(virt, PAGE_CACHE_SIZE, &size,
+-                                       crypt_stat, ecryptfs_dentry);
++      rc = ecryptfs_write_headers_virt(virt, virt_len, &size, crypt_stat,
++                                       ecryptfs_dentry);
+       if (unlikely(rc)) {
+               printk(KERN_ERR "%s: Error whilst writing headers; rc = [%d]\n",
+                      __func__, rc);
+               goto out_free;
+       }
+       if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
+-              rc = ecryptfs_write_metadata_to_xattr(ecryptfs_dentry,
+-                                                    crypt_stat, virt, size);
++              rc = ecryptfs_write_metadata_to_xattr(ecryptfs_dentry, virt,
++                                                    size);
+       else
+-              rc = ecryptfs_write_metadata_to_contents(crypt_stat,
+-                                                       ecryptfs_dentry, virt);
++              rc = ecryptfs_write_metadata_to_contents(ecryptfs_dentry, virt,
++                                                       virt_len);
+       if (rc) {
+               printk(KERN_ERR "%s: Error writing metadata out to lower file; "
+                      "rc = [%d]\n", __func__, rc);
+               goto out_free;
+       }
+ out_free:
+-      free_page((unsigned long)virt);
++      free_pages((unsigned long)virt, order);
+ out:
+       return rc;
+ }
+--- kernel-maemo-2.6.28.test.orig/fs/ecryptfs/inode.c
++++ kernel-maemo-2.6.28.test/fs/ecryptfs/inode.c
+@@ -673,10 +673,11 @@
+       ecryptfs_printk(KERN_DEBUG, "Calling readlink w/ "
+                       "dentry->d_name.name = [%s]\n", dentry->d_name.name);
+       rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
+-      buf[rc] = '\0';
+       set_fs(old_fs);
+       if (rc < 0)
+               goto out_free;
++      else
++              buf[rc] = '\0';
+       rc = 0;
+       nd_set_link(nd, buf);
+       goto out;
+--- kernel-maemo-2.6.28.test.orig/fs/ecryptfs/mmap.c
++++ kernel-maemo-2.6.28.test/fs/ecryptfs/mmap.c
+@@ -288,7 +288,7 @@
+       loff_t prev_page_end_size;
+       int rc = 0;
+-      page = __grab_cache_page(mapping, index);
++      page = grab_cache_page_write_begin(mapping, index, flags);
+       if (!page)
+               return -ENOMEM;
+       *pagep = page;
+--- kernel-maemo-2.6.28.test.orig/fs/eventfd.c
++++ kernel-maemo-2.6.28.test/fs/eventfd.c
+@@ -198,7 +198,7 @@
+       return file;
+ }
+-asmlinkage long sys_eventfd2(unsigned int count, int flags)
++SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
+ {
+       int fd;
+       struct eventfd_ctx *ctx;
+@@ -228,8 +228,7 @@
+       return fd;
+ }
+-asmlinkage long sys_eventfd(unsigned int count)
++SYSCALL_DEFINE1(eventfd, unsigned int, count)
+ {
+       return sys_eventfd2(count, 0);
+ }
+-
+--- kernel-maemo-2.6.28.test.orig/fs/eventpoll.c
++++ kernel-maemo-2.6.28.test/fs/eventpoll.c
+@@ -1095,7 +1095,7 @@
+ /*
+  * Open an eventpoll file descriptor.
+  */
+-asmlinkage long sys_epoll_create1(int flags)
++SYSCALL_DEFINE1(epoll_create1, int, flags)
+ {
+       int error, fd = -1;
+       struct eventpoll *ep;
+@@ -1134,7 +1134,7 @@
+       return fd;
+ }
+-asmlinkage long sys_epoll_create(int size)
++SYSCALL_DEFINE1(epoll_create, int, size)
+ {
+       if (size < 0)
+               return -EINVAL;
+@@ -1147,8 +1147,8 @@
+  * the eventpoll file that enables the insertion/removal/change of
+  * file descriptors inside the interest set.
+  */
+-asmlinkage long sys_epoll_ctl(int epfd, int op, int fd,
+-                            struct epoll_event __user *event)
++SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
++              struct epoll_event __user *, event)
+ {
+       int error;
+       struct file *file, *tfile;
+@@ -1245,8 +1245,8 @@
+  * Implement the event wait interface for the eventpoll file. It is the kernel
+  * part of the user space epoll_wait(2).
+  */
+-asmlinkage long sys_epoll_wait(int epfd, struct epoll_event __user *events,
+-                             int maxevents, int timeout)
++SYSCALL_DEFINE4(epoll_wait, int, epfd, struct epoll_event __user *, events,
++              int, maxevents, int, timeout)
+ {
+       int error;
+       struct file *file;
+@@ -1303,9 +1303,9 @@
+  * Implement the event wait interface for the eventpoll file. It is the kernel
+  * part of the user space epoll_pwait(2).
+  */
+-asmlinkage long sys_epoll_pwait(int epfd, struct epoll_event __user *events,
+-              int maxevents, int timeout, const sigset_t __user *sigmask,
+-              size_t sigsetsize)
++SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events,
++              int, maxevents, int, timeout, const sigset_t __user *, sigmask,
++              size_t, sigsetsize)
+ {
+       int error;
+       sigset_t ksigmask, sigsaved;
+--- kernel-maemo-2.6.28.test.orig/fs/exec.c
++++ kernel-maemo-2.6.28.test/fs/exec.c
+@@ -102,7 +102,7 @@
+  *
+  * Also note that we take the address to load from from the file itself.
+  */
+-asmlinkage long sys_uselib(const char __user * library)
++SYSCALL_DEFINE1(uselib, const char __user *, library)
+ {
+       struct file *file;
+       struct nameidata nd;
+@@ -1084,9 +1084,7 @@
+ {
+       int unsafe = tracehook_unsafe_exec(p);
+-      if (atomic_read(&p->fs->count) > 1 ||
+-          atomic_read(&p->files->count) > 1 ||
+-          atomic_read(&p->sighand->count) > 1)
++      if (atomic_read(&p->fs->count) > 1)
+               unsafe |= LSM_UNSAFE_SHARE;
+       return unsafe;
+--- kernel-maemo-2.6.28.test.orig/fs/ext2/super.c
++++ kernel-maemo-2.6.28.test/fs/ext2/super.c
+@@ -1177,9 +1177,12 @@
+       es = sbi->s_es;
+       if (((sbi->s_mount_opt & EXT2_MOUNT_XIP) !=
+           (old_mount_opt & EXT2_MOUNT_XIP)) &&
+-          invalidate_inodes(sb))
+-              ext2_warning(sb, __func__, "busy inodes while remounting "\
+-                           "xip remain in cache (no functional problem)");
++          invalidate_inodes(sb)) {
++              ext2_warning(sb, __func__, "refusing change of xip flag "
++                           "with busy inodes while remounting");
++              sbi->s_mount_opt &= ~EXT2_MOUNT_XIP;
++              sbi->s_mount_opt |= old_mount_opt & EXT2_MOUNT_XIP;
++      }
+       if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
+               return 0;
+       if (*flags & MS_RDONLY) {
+--- kernel-maemo-2.6.28.test.orig/fs/ext3/inode.c
++++ kernel-maemo-2.6.28.test/fs/ext3/inode.c
+@@ -1160,7 +1160,7 @@
+       to = from + len;
+ retry:
+-      page = __grab_cache_page(mapping, index);
++      page = grab_cache_page_write_begin(mapping, index, flags);
+       if (!page)
+               return -ENOMEM;
+       *pagep = page;
+--- kernel-maemo-2.6.28.test.orig/fs/ext3/namei.c
++++ kernel-maemo-2.6.28.test/fs/ext3/namei.c
+@@ -1357,7 +1357,7 @@
+       struct fake_dirent *fde;
+       blocksize =  dir->i_sb->s_blocksize;
+-      dxtrace(printk("Creating index\n"));
++      dxtrace(printk(KERN_DEBUG "Creating index: inode %lu\n", dir->i_ino));
+       retval = ext3_journal_get_write_access(handle, bh);
+       if (retval) {
+               ext3_std_error(dir->i_sb, retval);
+@@ -1366,6 +1366,19 @@
+       }
+       root = (struct dx_root *) bh->b_data;
++      /* The 0th block becomes the root, move the dirents out */
++      fde = &root->dotdot;
++      de = (struct ext3_dir_entry_2 *)((char *)fde +
++                      ext3_rec_len_from_disk(fde->rec_len));
++      if ((char *) de >= (((char *) root) + blocksize)) {
++              ext3_error(dir->i_sb, __func__,
++                         "invalid rec_len for '..' in inode %lu",
++                         dir->i_ino);
++              brelse(bh);
++              return -EIO;
++      }
++      len = ((char *) root) + blocksize - (char *) de;
++
+       bh2 = ext3_append (handle, dir, &block, &retval);
+       if (!(bh2)) {
+               brelse(bh);
+@@ -1374,11 +1387,6 @@
+       EXT3_I(dir)->i_flags |= EXT3_INDEX_FL;
+       data1 = bh2->b_data;
+-      /* The 0th block becomes the root, move the dirents out */
+-      fde = &root->dotdot;
+-      de = (struct ext3_dir_entry_2 *)((char *)fde +
+-                      ext3_rec_len_from_disk(fde->rec_len));
+-      len = ((char *) root) + blocksize - (char *) de;
+       memcpy (data1, de, len);
+       de = (struct ext3_dir_entry_2 *) data1;
+       top = data1 + len;
+@@ -2170,8 +2178,7 @@
+                * We have a transaction open.  All is sweetness.  It also sets
+                * i_size in generic_commit_write().
+                */
+-              err = __page_symlink(inode, symname, l,
+-                              mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
++              err = __page_symlink(inode, symname, l, 1);
+               if (err) {
+                       drop_nlink(inode);
+                       ext3_mark_inode_dirty(handle, inode);
+--- kernel-maemo-2.6.28.test.orig/fs/ext4/balloc.c
++++ kernel-maemo-2.6.28.test/fs/ext4/balloc.c
+@@ -20,6 +20,7 @@
+ #include "ext4.h"
+ #include "ext4_jbd2.h"
+ #include "group.h"
++#include "mballoc.h"
+ /*
+  * balloc.c contains the blocks allocation and deallocation routines
+@@ -319,20 +320,41 @@
+                           block_group, bitmap_blk);
+               return NULL;
+       }
+-      if (buffer_uptodate(bh) &&
+-          !(desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))
++
++      if (bitmap_uptodate(bh))
+               return bh;
+       lock_buffer(bh);
++      if (bitmap_uptodate(bh)) {
++              unlock_buffer(bh);
++              return bh;
++      }
+       spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group));
+       if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+               ext4_init_block_bitmap(sb, bh, block_group, desc);
++              set_bitmap_uptodate(bh);
+               set_buffer_uptodate(bh);
+               unlock_buffer(bh);
+               spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
+               return bh;
+       }
+       spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
++      if (buffer_uptodate(bh)) {
++              /*
++               * if not uninit if bh is uptodate,
++               * bitmap is also uptodate
++               */
++              set_bitmap_uptodate(bh);
++              unlock_buffer(bh);
++              return bh;
++      }
++      /*
++       * submit the buffer_head for read. We can
++       * safely mark the bitmap as uptodate now.
++       * We do it here so the bitmap uptodate bit
++       * get set with buffer lock held.
++       */
++      set_bitmap_uptodate(bh);
+       if (bh_submit_read(bh) < 0) {
+               put_bh(bh);
+               ext4_error(sb, __func__,
+@@ -350,62 +372,44 @@
+ }
+ /**
+- * ext4_free_blocks_sb() -- Free given blocks and update quota
++ * ext4_add_groupblocks() -- Add given blocks to an existing group
+  * @handle:                   handle to this transaction
+  * @sb:                               super block
+- * @block:                    start physcial block to free
++ * @block:                    start physcial block to add to the block group
+  * @count:                    number of blocks to free
+- * @pdquot_freed_blocks:      pointer to quota
+  *
+- * XXX This function is only used by the on-line resizing code, which
+- * should probably be fixed up to call the mballoc variant.  There
+- * this needs to be cleaned up later; in fact, I'm not convinced this
+- * is 100% correct in the face of the mballoc code.  The online resizing
+- * code needs to be fixed up to more tightly (and correctly) interlock
+- * with the mballoc code.
+- */
+-void ext4_free_blocks_sb(handle_t *handle, struct super_block *sb,
+-                       ext4_fsblk_t block, unsigned long count,
+-                       unsigned long *pdquot_freed_blocks)
++ * This marks the blocks as free in the bitmap. We ask the
++ * mballoc to reload the buddy after this by setting group
++ * EXT4_GROUP_INFO_NEED_INIT_BIT flag
++ */
++void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
++                       ext4_fsblk_t block, unsigned long count)
+ {
+       struct buffer_head *bitmap_bh = NULL;
+       struct buffer_head *gd_bh;
+       ext4_group_t block_group;
+       ext4_grpblk_t bit;
+       unsigned long i;
+-      unsigned long overflow;
+       struct ext4_group_desc *desc;
+       struct ext4_super_block *es;
+       struct ext4_sb_info *sbi;
+       int err = 0, ret;
+-      ext4_grpblk_t group_freed;
++      ext4_grpblk_t blocks_freed;
++      struct ext4_group_info *grp;
+-      *pdquot_freed_blocks = 0;
+       sbi = EXT4_SB(sb);
+       es = sbi->s_es;
+-      if (block < le32_to_cpu(es->s_first_data_block) ||
+-          block + count < block ||
+-          block + count > ext4_blocks_count(es)) {
+-              ext4_error(sb, "ext4_free_blocks",
+-                         "Freeing blocks not in datazone - "
+-                         "block = %llu, count = %lu", block, count);
+-              goto error_return;
+-      }
+-
+-      ext4_debug("freeing block(s) %llu-%llu\n", block, block + count - 1);
++      ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
+-do_more:
+-      overflow = 0;
+       ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
++      grp = ext4_get_group_info(sb, block_group);
+       /*
+        * Check to see if we are freeing blocks across a group
+        * boundary.
+        */
+       if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
+-              overflow = bit + count - EXT4_BLOCKS_PER_GROUP(sb);
+-              count -= overflow;
++              goto error_return;
+       }
+-      brelse(bitmap_bh);
+       bitmap_bh = ext4_read_block_bitmap(sb, block_group);
+       if (!bitmap_bh)
+               goto error_return;
+@@ -418,18 +422,17 @@
+           in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
+           in_range(block + count - 1, ext4_inode_table(sb, desc),
+                    sbi->s_itb_per_group)) {
+-              ext4_error(sb, "ext4_free_blocks",
+-                         "Freeing blocks in system zones - "
++              ext4_error(sb, __func__,
++                         "Adding blocks in system zones - "
+                          "Block = %llu, count = %lu",
+                          block, count);
+               goto error_return;
+       }
+       /*
+-       * We are about to start releasing blocks in the bitmap,
++       * We are about to add blocks to the bitmap,
+        * so we need undo access.
+        */
+-      /* @@@ check errors */
+       BUFFER_TRACE(bitmap_bh, "getting undo access");
+       err = ext4_journal_get_undo_access(handle, bitmap_bh);
+       if (err)
+@@ -444,90 +447,42 @@
+       err = ext4_journal_get_write_access(handle, gd_bh);
+       if (err)
+               goto error_return;
+-
+-      jbd_lock_bh_state(bitmap_bh);
+-
+-      for (i = 0, group_freed = 0; i < count; i++) {
+-              /*
+-               * An HJ special.  This is expensive...
+-               */
+-#ifdef CONFIG_JBD2_DEBUG
+-              jbd_unlock_bh_state(bitmap_bh);
+-              {
+-                      struct buffer_head *debug_bh;
+-                      debug_bh = sb_find_get_block(sb, block + i);
+-                      if (debug_bh) {
+-                              BUFFER_TRACE(debug_bh, "Deleted!");
+-                              if (!bh2jh(bitmap_bh)->b_committed_data)
+-                                      BUFFER_TRACE(debug_bh,
+-                                              "No commited data in bitmap");
+-                              BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap");
+-                              __brelse(debug_bh);
+-                      }
+-              }
+-              jbd_lock_bh_state(bitmap_bh);
+-#endif
+-              if (need_resched()) {
+-                      jbd_unlock_bh_state(bitmap_bh);
+-                      cond_resched();
+-                      jbd_lock_bh_state(bitmap_bh);
+-              }
+-              /* @@@ This prevents newly-allocated data from being
+-               * freed and then reallocated within the same
+-               * transaction.
+-               *
+-               * Ideally we would want to allow that to happen, but to
+-               * do so requires making jbd2_journal_forget() capable of
+-               * revoking the queued write of a data block, which
+-               * implies blocking on the journal lock.  *forget()
+-               * cannot block due to truncate races.
+-               *
+-               * Eventually we can fix this by making jbd2_journal_forget()
+-               * return a status indicating whether or not it was able
+-               * to revoke the buffer.  On successful revoke, it is
+-               * safe not to set the allocation bit in the committed
+-               * bitmap, because we know that there is no outstanding
+-               * activity on the buffer any more and so it is safe to
+-               * reallocate it.
+-               */
+-              BUFFER_TRACE(bitmap_bh, "set in b_committed_data");
+-              J_ASSERT_BH(bitmap_bh,
+-                              bh2jh(bitmap_bh)->b_committed_data != NULL);
+-              ext4_set_bit_atomic(sb_bgl_lock(sbi, block_group), bit + i,
+-                              bh2jh(bitmap_bh)->b_committed_data);
+-
+-              /*
+-               * We clear the bit in the bitmap after setting the committed
+-               * data bit, because this is the reverse order to that which
+-               * the allocator uses.
+-               */
++      /*
++       * make sure we don't allow a parallel init on other groups in the
++       * same buddy cache
++       */
++      down_write(&grp->alloc_sem);
++      for (i = 0, blocks_freed = 0; i < count; i++) {
+               BUFFER_TRACE(bitmap_bh, "clear bit");
+               if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
+                                               bit + i, bitmap_bh->b_data)) {
+-                      jbd_unlock_bh_state(bitmap_bh);
+                       ext4_error(sb, __func__,
+                                  "bit already cleared for block %llu",
+                                  (ext4_fsblk_t)(block + i));
+-                      jbd_lock_bh_state(bitmap_bh);
+                       BUFFER_TRACE(bitmap_bh, "bit already cleared");
+               } else {
+-                      group_freed++;
++                      blocks_freed++;
+               }
+       }
+-      jbd_unlock_bh_state(bitmap_bh);
+-
+       spin_lock(sb_bgl_lock(sbi, block_group));
+-      le16_add_cpu(&desc->bg_free_blocks_count, group_freed);
++      le16_add_cpu(&desc->bg_free_blocks_count, blocks_freed);
+       desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
+       spin_unlock(sb_bgl_lock(sbi, block_group));
+-      percpu_counter_add(&sbi->s_freeblocks_counter, count);
++      percpu_counter_add(&sbi->s_freeblocks_counter, blocks_freed);
+       if (sbi->s_log_groups_per_flex) {
+               ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
+               spin_lock(sb_bgl_lock(sbi, flex_group));
+-              sbi->s_flex_groups[flex_group].free_blocks += count;
++              sbi->s_flex_groups[flex_group].free_blocks += blocks_freed;
+               spin_unlock(sb_bgl_lock(sbi, flex_group));
+       }
++      /*
++       * request to reload the buddy with the
++       * new bitmap information
++       */
++      set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
++      ext4_mb_update_group_info(grp, blocks_freed);
++      up_write(&grp->alloc_sem);
+       /* We dirtied the bitmap block */
+       BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
+@@ -536,15 +491,10 @@
+       /* And the group descriptor block */
+       BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
+       ret = ext4_journal_dirty_metadata(handle, gd_bh);
+-      if (!err) err = ret;
+-      *pdquot_freed_blocks += group_freed;
+-
+-      if (overflow && !err) {
+-              block += count;
+-              count = overflow;
+-              goto do_more;
+-      }
++      if (!err)
++              err = ret;
+       mark_sb_dirty(sb);
++
+ error_return:
+       brelse(bitmap_bh);
+       ext4_std_error(sb, err);
+--- kernel-maemo-2.6.28.test.orig/fs/ext4/ext4.h
++++ kernel-maemo-2.6.28.test/fs/ext4/ext4.h
+@@ -19,6 +19,7 @@
+ #include <linux/types.h>
+ #include <linux/blkdev.h>
+ #include <linux/magic.h>
++#include <linux/jbd2.h>
+ #include "ext4_i.h"
+ /*
+@@ -861,7 +862,7 @@
+ {
+       unsigned len = le16_to_cpu(dlen);
+-      if (len == EXT4_MAX_REC_LEN)
++      if (len == EXT4_MAX_REC_LEN || len == 0)
+               return 1 << 16;
+       return len;
+ }
+@@ -891,6 +892,9 @@
+ #define DX_HASH_LEGACY                0
+ #define DX_HASH_HALF_MD4      1
+ #define DX_HASH_TEA           2
++#define DX_HASH_LEGACY_UNSIGNED       3
++#define DX_HASH_HALF_MD4_UNSIGNED     4
++#define DX_HASH_TEA_UNSIGNED          5
+ #ifdef __KERNEL__
+@@ -1006,9 +1010,8 @@
+ extern int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks);
+ extern void ext4_free_blocks(handle_t *handle, struct inode *inode,
+                       ext4_fsblk_t block, unsigned long count, int metadata);
+-extern void ext4_free_blocks_sb(handle_t *handle, struct super_block *sb,
+-                              ext4_fsblk_t block, unsigned long count,
+-                              unsigned long *pdquot_freed_blocks);
++extern void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
++                              ext4_fsblk_t block, unsigned long count);
+ extern ext4_fsblk_t ext4_count_free_blocks(struct super_block *);
+ extern void ext4_check_blocks_bitmap(struct super_block *);
+ extern struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb,
+@@ -1054,12 +1057,13 @@
+ extern void exit_ext4_mballoc(void);
+ extern void ext4_mb_free_blocks(handle_t *, struct inode *,
+               unsigned long, unsigned long, int, unsigned long *);
+-extern int ext4_mb_add_more_groupinfo(struct super_block *sb,
++extern int ext4_mb_add_groupinfo(struct super_block *sb,
+               ext4_group_t i, struct ext4_group_desc *desc);
+ extern void ext4_mb_update_group_info(struct ext4_group_info *grp,
+               ext4_grpblk_t add);
+-
+-
++extern int ext4_mb_get_buddy_cache_lock(struct super_block *, ext4_group_t);
++extern void ext4_mb_put_buddy_cache_lock(struct super_block *,
++                                              ext4_group_t, int);
+ /* inode.c */
+ int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
+               struct buffer_head *bh, ext4_fsblk_t blocknr);
+@@ -1184,8 +1188,11 @@
+ static inline loff_t ext4_isize(struct ext4_inode *raw_inode)
+ {
+-      return ((loff_t)le32_to_cpu(raw_inode->i_size_high) << 32) |
+-              le32_to_cpu(raw_inode->i_size_lo);
++      if (S_ISREG(le16_to_cpu(raw_inode->i_mode)))
++              return ((loff_t)le32_to_cpu(raw_inode->i_size_high) << 32) |
++                      le32_to_cpu(raw_inode->i_size_lo);
++      else
++              return (loff_t) le32_to_cpu(raw_inode->i_size_lo);
+ }
+ static inline void ext4_isize_set(struct ext4_inode *raw_inode, loff_t i_size)
+@@ -1283,6 +1290,24 @@
+                       sector_t block, unsigned long max_blocks,
+                       struct buffer_head *bh, int create,
+                       int extend_disksize, int flag);
++
++/*
++ * Add new method to test wether block and inode bitmaps are properly
++ * initialized. With uninit_bg reading the block from disk is not enough
++ * to mark the bitmap uptodate. We need to also zero-out the bitmap
++ */
++#define BH_BITMAP_UPTODATE BH_JBDPrivateStart
++
++static inline int bitmap_uptodate(struct buffer_head *bh)
++{
++      return (buffer_uptodate(bh) &&
++                      test_bit(BH_BITMAP_UPTODATE, &(bh)->b_state));
++}
++static inline void set_bitmap_uptodate(struct buffer_head *bh)
++{
++      set_bit(BH_BITMAP_UPTODATE, &(bh)->b_state);
++}
++
+ #endif        /* __KERNEL__ */
+ #endif        /* _EXT4_H */
+--- kernel-maemo-2.6.28.test.orig/fs/ext4/ext4_sb.h
++++ kernel-maemo-2.6.28.test/fs/ext4/ext4_sb.h
+@@ -57,6 +57,7 @@
+       u32 s_next_generation;
+       u32 s_hash_seed[4];
+       int s_def_hash_version;
++      int s_hash_unsigned;    /* 3 if hash should be signed, 0 if not */
+       struct percpu_counter s_freeblocks_counter;
+       struct percpu_counter s_freeinodes_counter;
+       struct percpu_counter s_dirs_counter;
+@@ -101,7 +102,8 @@
+       spinlock_t s_reserve_lock;
+       spinlock_t s_md_lock;
+       tid_t s_last_transaction;
+-      unsigned short *s_mb_offsets, *s_mb_maxs;
++      unsigned short *s_mb_offsets;
++      unsigned int *s_mb_maxs;
+       /* tunables */
+       unsigned long s_stripe;
+--- kernel-maemo-2.6.28.test.orig/fs/ext4/hash.c
++++ kernel-maemo-2.6.28.test/fs/ext4/hash.c
+@@ -35,23 +35,71 @@
+ /* The old legacy hash */
+-static __u32 dx_hack_hash(const char *name, int len)
++static __u32 dx_hack_hash_unsigned(const char *name, int len)
+ {
+-      __u32 hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9;
++      __u32 hash, hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9;
++      const unsigned char *ucp = (const unsigned char *) name;
++
++      while (len--) {
++              hash = hash1 + (hash0 ^ (((int) *ucp++) * 7152373));
++
++              if (hash & 0x80000000)
++                      hash -= 0x7fffffff;
++              hash1 = hash0;
++              hash0 = hash;
++      }
++      return hash0 << 1;
++}
++
++static __u32 dx_hack_hash_signed(const char *name, int len)
++{
++      __u32 hash, hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9;
++      const signed char *scp = (const signed char *) name;
++
+       while (len--) {
+-              __u32 hash = hash1 + (hash0 ^ (*name++ * 7152373));
++              hash = hash1 + (hash0 ^ (((int) *scp++) * 7152373));
+-              if (hash & 0x80000000) hash -= 0x7fffffff;
++              if (hash & 0x80000000)
++                      hash -= 0x7fffffff;
+               hash1 = hash0;
+               hash0 = hash;
+       }
+-      return (hash0 << 1);
++      return hash0 << 1;
++}
++
++static void str2hashbuf_signed(const char *msg, int len, __u32 *buf, int num)
++{
++      __u32   pad, val;
++      int     i;
++      const signed char *scp = (const signed char *) msg;
++
++      pad = (__u32)len | ((__u32)len << 8);
++      pad |= pad << 16;
++
++      val = pad;
++      if (len > num*4)
++              len = num * 4;
++      for (i = 0; i < len; i++) {
++              if ((i % 4) == 0)
++                      val = pad;
++              val = ((int) scp[i]) + (val << 8);
++              if ((i % 4) == 3) {
++                      *buf++ = val;
++                      val = pad;
++                      num--;
++              }
++      }
++      if (--num >= 0)
++              *buf++ = val;
++      while (--num >= 0)
++              *buf++ = pad;
+ }
+-static void str2hashbuf(const char *msg, int len, __u32 *buf, int num)
++static void str2hashbuf_unsigned(const char *msg, int len, __u32 *buf, int num)
+ {
+       __u32   pad, val;
+       int     i;
++      const unsigned char *ucp = (const unsigned char *) msg;
+       pad = (__u32)len | ((__u32)len << 8);
+       pad |= pad << 16;
+@@ -62,7 +110,7 @@
+       for (i = 0; i < len; i++) {
+               if ((i % 4) == 0)
+                       val = pad;
+-              val = msg[i] + (val << 8);
++              val = ((int) ucp[i]) + (val << 8);
+               if ((i % 4) == 3) {
+                       *buf++ = val;
+                       val = pad;
+@@ -95,6 +143,8 @@
+       const char      *p;
+       int             i;
+       __u32           in[8], buf[4];
++      void            (*str2hashbuf)(const char *, int, __u32 *, int) =
++                              str2hashbuf_signed;
+       /* Initialize the default seed for the hash checksum functions */
+       buf[0] = 0x67452301;
+@@ -113,13 +163,18 @@
+       }
+       switch (hinfo->hash_version) {
++      case DX_HASH_LEGACY_UNSIGNED:
++              hash = dx_hack_hash_unsigned(name, len);
++              break;
+       case DX_HASH_LEGACY:
+-              hash = dx_hack_hash(name, len);
++              hash = dx_hack_hash_signed(name, len);
+               break;
++      case DX_HASH_HALF_MD4_UNSIGNED:
++              str2hashbuf = str2hashbuf_unsigned;
+       case DX_HASH_HALF_MD4:
+               p = name;
+               while (len > 0) {
+-                      str2hashbuf(p, len, in, 8);
++                      (*str2hashbuf)(p, len, in, 8);
+                       half_md4_transform(buf, in);
+                       len -= 32;
+                       p += 32;
+@@ -127,10 +182,12 @@
+               minor_hash = buf[2];
+               hash = buf[1];
+               break;
++      case DX_HASH_TEA_UNSIGNED:
++              str2hashbuf = str2hashbuf_unsigned;
+       case DX_HASH_TEA:
+               p = name;
+               while (len > 0) {
+-                      str2hashbuf(p, len, in, 4);
++                      (*str2hashbuf)(p, len, in, 4);
+                       TEA_transform(buf, in);
+                       len -= 16;
+                       p += 16;
+--- kernel-maemo-2.6.28.test.orig/fs/ext4/ialloc.c
++++ kernel-maemo-2.6.28.test/fs/ext4/ialloc.c
+@@ -84,7 +84,7 @@
+       }
+       memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
+-      mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), EXT4_BLOCKS_PER_GROUP(sb),
++      mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
+                       bh->b_data);
+       return EXT4_INODES_PER_GROUP(sb);
+@@ -115,20 +115,40 @@
+                           block_group, bitmap_blk);
+               return NULL;
+       }
+-      if (buffer_uptodate(bh) &&
+-          !(desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)))
++      if (bitmap_uptodate(bh))
+               return bh;
+       lock_buffer(bh);
++      if (bitmap_uptodate(bh)) {
++              unlock_buffer(bh);
++              return bh;
++      }
+       spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group));
+       if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
+               ext4_init_inode_bitmap(sb, bh, block_group, desc);
++              set_bitmap_uptodate(bh);
+               set_buffer_uptodate(bh);
+               unlock_buffer(bh);
+               spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
+               return bh;
+       }
+       spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
++      if (buffer_uptodate(bh)) {
++              /*
++               * if not uninit if bh is uptodate,
++               * bitmap is also uptodate
++               */
++              set_bitmap_uptodate(bh);
++              unlock_buffer(bh);
++              return bh;
++      }
++      /*
++       * submit the buffer_head for read. We can
++       * safely mark the bitmap as uptodate now.
++       * We do it here so the bitmap uptodate bit
++       * get set with buffer lock held.
++       */
++      set_bitmap_uptodate(bh);
+       if (bh_submit_read(bh) < 0) {
+               put_bh(bh);
+               ext4_error(sb, __func__,
+@@ -570,6 +590,77 @@
+ }
+ /*
++ * claim the inode from the inode bitmap. If the group
++ * is uninit we need to take the groups's sb_bgl_lock
++ * and clear the uninit flag. The inode bitmap update
++ * and group desc uninit flag clear should be done
++ * after holding sb_bgl_lock so that ext4_read_inode_bitmap
++ * doesn't race with the ext4_claim_inode
++ */
++static int ext4_claim_inode(struct super_block *sb,
++                      struct buffer_head *inode_bitmap_bh,
++                      unsigned long ino, ext4_group_t group, int mode)
++{
++      int free = 0, retval = 0;
++      struct ext4_sb_info *sbi = EXT4_SB(sb);
++      struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
++
++      spin_lock(sb_bgl_lock(sbi, group));
++      if (ext4_set_bit(ino, inode_bitmap_bh->b_data)) {
++              /* not a free inode */
++              retval = 1;
++              goto err_ret;
++      }
++      ino++;
++      if ((group == 0 && ino < EXT4_FIRST_INO(sb)) ||
++                      ino > EXT4_INODES_PER_GROUP(sb)) {
++              spin_unlock(sb_bgl_lock(sbi, group));
++              ext4_error(sb, __func__,
++                         "reserved inode or inode > inodes count - "
++                         "block_group = %lu, inode=%lu", group,
++                         ino + group * EXT4_INODES_PER_GROUP(sb));
++              return 1;
++      }
++      /* If we didn't allocate from within the initialized part of the inode
++       * table then we need to initialize up to this inode. */
++      if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
++
++              if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
++                      gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
++                      /* When marking the block group with
++                       * ~EXT4_BG_INODE_UNINIT we don't want to depend
++                       * on the value of bg_itable_unused even though
++                       * mke2fs could have initialized the same for us.
++                       * Instead we calculated the value below
++                       */
++
++                      free = 0;
++              } else {
++                      free = EXT4_INODES_PER_GROUP(sb) -
++                              le16_to_cpu(gdp->bg_itable_unused);
++              }
++
++              /*
++               * Check the relative inode number against the last used
++               * relative inode number in this group. if it is greater
++               * we need to  update the bg_itable_unused count
++               *
++               */
++              if (ino > free)
++                      gdp->bg_itable_unused =
++                              cpu_to_le16(EXT4_INODES_PER_GROUP(sb) - ino);
++      }
++      le16_add_cpu(&gdp->bg_free_inodes_count, -1);
++      if (S_ISDIR(mode)) {
++              le16_add_cpu(&gdp->bg_used_dirs_count, 1);
++      }
++      gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
++err_ret:
++      spin_unlock(sb_bgl_lock(sbi, group));
++      return retval;
++}
++
++/*
+  * There are two policies for allocating an inode.  If the new inode is
+  * a directory, then a forward search is made for a block group with both
+  * free space and a low directory-to-inode ratio; if that fails, then of
+@@ -612,6 +703,13 @@
+       if (sbi->s_log_groups_per_flex) {
+               ret2 = find_group_flex(sb, dir, &group);
++              if (ret2 == -1) {
++                      ret2 = find_group_other(sb, dir, &group);
++                      if (ret2 == 0 && printk_ratelimit())
++                              printk(KERN_NOTICE "ext4: find_group_flex "
++                                     "failed, fallback succeeded dir %lu\n",
++                                     dir->i_ino);
++              }
+               goto got_group;
+       }
+@@ -652,8 +750,12 @@
+                       if (err)
+                               goto fail;
+-                      if (!ext4_set_bit_atomic(sb_bgl_lock(sbi, group),
+-                                              ino, bitmap_bh->b_data)) {
++                      BUFFER_TRACE(bh2, "get_write_access");
++                      err = ext4_journal_get_write_access(handle, bh2);
++                      if (err)
++                              goto fail;
++                      if (!ext4_claim_inode(sb, bitmap_bh,
++                                              ino, group, mode)) {
+                               /* we won it */
+                               BUFFER_TRACE(bitmap_bh,
+                                       "call ext4_journal_dirty_metadata");
+@@ -661,10 +763,13 @@
+                                                               bitmap_bh);
+                               if (err)
+                                       goto fail;
++                              /* zero bit is inode number 1*/
++                              ino++;
+                               goto got;
+                       }
+                       /* we lost it */
+                       jbd2_journal_release_buffer(handle, bitmap_bh);
++                      jbd2_journal_release_buffer(handle, bh2);
+                       if (++ino < EXT4_INODES_PER_GROUP(sb))
+                               goto repeat_in_this_group;
+@@ -684,21 +789,6 @@
+       goto out;
+ got:
+-      ino++;
+-      if ((group == 0 && ino < EXT4_FIRST_INO(sb)) ||
+-          ino > EXT4_INODES_PER_GROUP(sb)) {
+-              ext4_error(sb, __func__,
+-                         "reserved inode or inode > inodes count - "
+-                         "block_group = %lu, inode=%lu", group,
+-                         ino + group * EXT4_INODES_PER_GROUP(sb));
+-              err = -EIO;
+-              goto fail;
+-      }
+-
+-      BUFFER_TRACE(bh2, "get_write_access");
+-      err = ext4_journal_get_write_access(handle, bh2);
+-      if (err) goto fail;
+-
+       /* We may have to initialize the block bitmap if it isn't already */
+       if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) &&
+           gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+@@ -733,47 +823,10 @@
+               if (err)
+                       goto fail;
+       }
+-
+-      spin_lock(sb_bgl_lock(sbi, group));
+-      /* If we didn't allocate from within the initialized part of the inode
+-       * table then we need to initialize up to this inode. */
+-      if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
+-              if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
+-                      gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
+-
+-                      /* When marking the block group with
+-                       * ~EXT4_BG_INODE_UNINIT we don't want to depend
+-                       * on the value of bg_itable_unused even though
+-                       * mke2fs could have initialized the same for us.
+-                       * Instead we calculated the value below
+-                       */
+-
+-                      free = 0;
+-              } else {
+-                      free = EXT4_INODES_PER_GROUP(sb) -
+-                              le16_to_cpu(gdp->bg_itable_unused);
+-              }
+-
+-              /*
+-               * Check the relative inode number against the last used
+-               * relative inode number in this group. if it is greater
+-               * we need to  update the bg_itable_unused count
+-               *
+-               */
+-              if (ino > free)
+-                      gdp->bg_itable_unused =
+-                              cpu_to_le16(EXT4_INODES_PER_GROUP(sb) - ino);
+-      }
+-
+-      le16_add_cpu(&gdp->bg_free_inodes_count, -1);
+-      if (S_ISDIR(mode)) {
+-              le16_add_cpu(&gdp->bg_used_dirs_count, 1);
+-      }
+-      gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
+-      spin_unlock(sb_bgl_lock(sbi, group));
+-      BUFFER_TRACE(bh2, "call ext4_journal_dirty_metadata");
++      BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
+       err = ext4_journal_dirty_metadata(handle, bh2);
+-      if (err) goto fail;
++      if (err)
++              goto fail;
+       percpu_counter_dec(&sbi->s_freeinodes_counter);
+       if (S_ISDIR(mode))
+--- kernel-maemo-2.6.28.test.orig/fs/ext4/inode.c
++++ kernel-maemo-2.6.28.test/fs/ext4/inode.c
+@@ -46,8 +46,10 @@
+ static inline int ext4_begin_ordered_truncate(struct inode *inode,
+                                             loff_t new_size)
+ {
+-      return jbd2_journal_begin_ordered_truncate(&EXT4_I(inode)->jinode,
+-                                                 new_size);
++      return jbd2_journal_begin_ordered_truncate(
++                                      EXT4_SB(inode->i_sb)->s_journal,
++                                      &EXT4_I(inode)->jinode,
++                                      new_size);
+ }
+ static void ext4_invalidatepage(struct page *page, unsigned long offset);
+@@ -351,9 +353,9 @@
+               final = ptrs;
+       } else {
+               ext4_warning(inode->i_sb, "ext4_block_to_path",
+-                              "block %lu > max",
++                              "block %lu > max in inode %lu",
+                               i_block + direct_blocks +
+-                              indirect_blocks + double_blocks);
++                              indirect_blocks + double_blocks, inode->i_ino);
+       }
+       if (boundary)
+               *boundary = final - 1 - (i_block & (ptrs - 1));
+@@ -1345,7 +1347,11 @@
+               goto out;
+       }
+-      page = __grab_cache_page(mapping, index);
++      /* We cannot recurse into the filesystem as the transaction is already
++       * started */
++      flags |= AOP_FLAG_NOFS;
++
++      page = grab_cache_page_write_begin(mapping, index, flags);
+       if (!page) {
+               ext4_journal_stop(handle);
+               ret = -ENOMEM;
+@@ -1354,7 +1360,7 @@
+       *pagep = page;
+       ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
+-                                                      ext4_get_block);
++                              ext4_get_block);
+       if (!ret && ext4_should_journal_data(inode)) {
+               ret = walk_page_buffers(handle, page_buffers(page),
+@@ -1644,35 +1650,39 @@
+  */
+ static int mpage_da_submit_io(struct mpage_da_data *mpd)
+ {
+-      struct address_space *mapping = mpd->inode->i_mapping;
+-      int ret = 0, err, nr_pages, i;
+-      unsigned long index, end;
+-      struct pagevec pvec;
+       long pages_skipped;
++      struct pagevec pvec;
++      unsigned long index, end;
++      int ret = 0, err, nr_pages, i;
++      struct inode *inode = mpd->inode;
++      struct address_space *mapping = inode->i_mapping;
+       BUG_ON(mpd->next_page <= mpd->first_page);
+-      pagevec_init(&pvec, 0);
++      /*
++       * We need to start from the first_page to the next_page - 1
++       * to make sure we also write the mapped dirty buffer_heads.
++       * If we look at mpd->lbh.b_blocknr we would only be looking
++       * at the currently mapped buffer_heads.
++       */
+       index = mpd->first_page;
+       end = mpd->next_page - 1;
++      pagevec_init(&pvec, 0);
+       while (index <= end) {
+-              /*
+-               * We can use PAGECACHE_TAG_DIRTY lookup here because
+-               * even though we have cleared the dirty flag on the page
+-               * We still keep the page in the radix tree with tag
+-               * PAGECACHE_TAG_DIRTY. See clear_page_dirty_for_io.
+-               * The PAGECACHE_TAG_DIRTY is cleared in set_page_writeback
+-               * which is called via the below writepage callback.
+-               */
+-              nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
+-                                      PAGECACHE_TAG_DIRTY,
+-                                      min(end - index,
+-                                      (pgoff_t)PAGEVEC_SIZE-1) + 1);
++              nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
+               if (nr_pages == 0)
+                       break;
+               for (i = 0; i < nr_pages; i++) {
+                       struct page *page = pvec.pages[i];
++                      index = page->index;
++                      if (index > end)
++                              break;
++                      index++;
++
++                      BUG_ON(!PageLocked(page));
++                      BUG_ON(PageWriteback(page));
++
+                       pages_skipped = mpd->wbc->pages_skipped;
+                       err = mapping->a_ops->writepage(page, mpd->wbc);
+                       if (!err && (pages_skipped == mpd->wbc->pages_skipped))
+@@ -2086,11 +2096,29 @@
+               bh = head;
+               do {
+                       BUG_ON(buffer_locked(bh));
++                      /*
++                       * We need to try to allocate
++                       * unmapped blocks in the same page.
++                       * Otherwise we won't make progress
++                       * with the page in ext4_da_writepage
++                       */
+                       if (buffer_dirty(bh) &&
+                               (!buffer_mapped(bh) || buffer_delay(bh))) {
+                               mpage_add_bh_to_extent(mpd, logical, bh);
+                               if (mpd->io_done)
+                                       return MPAGE_DA_EXTENT_TAIL;
++                      } else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
++                              /*
++                               * mapped dirty buffer. We need to update
++                               * the b_state because we look at
++                               * b_state in mpage_da_map_blocks. We don't
++                               * update b_size because if we find an
++                               * unmapped buffer_head later we need to
++                               * use the b_state flag of that buffer_head.
++                               */
++                              if (mpd->lbh.b_size == 0)
++                                      mpd->lbh.b_state =
++                                              bh->b_state & BH_FLAGS;
+                       }
+                       logical++;
+               } while ((bh = bh->b_this_page) != head);
+@@ -2378,6 +2406,7 @@
+       struct inode *inode = mapping->host;
+       int no_nrwrite_index_update;
+       long pages_written = 0, pages_skipped;
++      int range_cyclic, cycled = 1, io_done = 0;
+       int needed_blocks, ret = 0, nr_to_writebump = 0;
+       struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
+@@ -2388,6 +2417,20 @@
+        */
+       if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
+               return 0;
++
++      /*
++       * If the filesystem has aborted, it is read-only, so return
++       * right away instead of dumping stack traces later on that
++       * will obscure the real source of the problem.  We test
++       * EXT4_MOUNT_ABORT instead of sb->s_flag's MS_RDONLY because
++       * the latter could be true if the filesystem is mounted
++       * read-only, and in that case, ext4_da_writepages should
++       * *never* be called, so if that ever happens, we would want
++       * the stack trace.
++       */
++      if (unlikely(sbi->s_mount_opt & EXT4_MOUNT_ABORT))
++              return -EROFS;
++
+       /*
+        * Make sure nr_to_write is >= sbi->s_mb_stream_request
+        * This make sure small files blocks are allocated in
+@@ -2401,9 +2444,15 @@
+       if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
+               range_whole = 1;
+-      if (wbc->range_cyclic)
++      range_cyclic = wbc->range_cyclic;
++      if (wbc->range_cyclic) {
+               index = mapping->writeback_index;
+-      else
++              if (index)
++                      cycled = 0;
++              wbc->range_start = index << PAGE_CACHE_SHIFT;
++              wbc->range_end  = LLONG_MAX;
++              wbc->range_cyclic = 0;
++      } else
+               index = wbc->range_start >> PAGE_CACHE_SHIFT;
+       mpd.wbc = wbc;
+@@ -2417,6 +2466,7 @@
+       wbc->no_nrwrite_index_update = 1;
+       pages_skipped = wbc->pages_skipped;
++retry:
+       while (!ret && wbc->nr_to_write > 0) {
+               /*
+@@ -2432,7 +2482,7 @@
+               handle = ext4_journal_start(inode, needed_blocks);
+               if (IS_ERR(handle)) {
+                       ret = PTR_ERR(handle);
+-                      printk(KERN_EMERG "%s: jbd2_start: "
++                      printk(KERN_CRIT "%s: jbd2_start: "
+                              "%ld pages, ino %lu; err %d\n", __func__,
+                               wbc->nr_to_write, inode->i_ino, ret);
+                       dump_stack();
+@@ -2459,6 +2509,7 @@
+                       pages_written += mpd.pages_written;
+                       wbc->pages_skipped = pages_skipped;
+                       ret = 0;
++                      io_done = 1;
+               } else if (wbc->nr_to_write)
+                       /*
+                        * There is no more writeout needed
+@@ -2467,6 +2518,13 @@
+                        */
+                       break;
+       }
++      if (!io_done && !cycled) {
++              cycled = 1;
++              index = 0;
++              wbc->range_start = index << PAGE_CACHE_SHIFT;
++              wbc->range_end  = mapping->writeback_index - 1;
++              goto retry;
++      }
+       if (pages_skipped != wbc->pages_skipped)
+               printk(KERN_EMERG "This should not happen leaving %s "
+                               "with nr_to_write = %ld ret = %d\n",
+@@ -2474,6 +2532,7 @@
+       /* Update index */
+       index += pages_written;
++      wbc->range_cyclic = range_cyclic;
+       if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
+               /*
+                * set the writeback_index so that range_cyclic
+@@ -2548,8 +2607,11 @@
+               ret = PTR_ERR(handle);
+               goto out;
+       }
++      /* We cannot recurse into the filesystem as the transaction is already
++       * started */
++      flags |= AOP_FLAG_NOFS;
+-      page = __grab_cache_page(mapping, index);
++      page = grab_cache_page_write_begin(mapping, index, flags);
+       if (!page) {
+               ext4_journal_stop(handle);
+               ret = -ENOMEM;
+--- kernel-maemo-2.6.28.test.orig/fs/ext4/mballoc.c
++++ kernel-maemo-2.6.28.test/fs/ext4/mballoc.c
+@@ -100,7 +100,7 @@
+  * inode as:
+  *
+  *  {                        page                        }
+- *  [ group 0 buddy][ group 0 bitmap] [group 1][ group 1]...
++ *  [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
+  *
+  *
+  * one block each for bitmap and buddy information.  So for each group we
+@@ -330,6 +330,18 @@
+  *        object
+  *
+  */
++static struct kmem_cache *ext4_pspace_cachep;
++static struct kmem_cache *ext4_ac_cachep;
++static struct kmem_cache *ext4_free_ext_cachep;
++static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
++                                      ext4_group_t group);
++static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
++                                              ext4_group_t group);
++static int ext4_mb_init_per_dev_proc(struct super_block *sb);
++static int ext4_mb_destroy_per_dev_proc(struct super_block *sb);
++static void release_blocks_on_commit(journal_t *journal, transaction_t *txn);
++
++
+ static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
+ {
+@@ -716,7 +728,7 @@
+  * stored in the inode as
+  *
+  * {                        page                        }
+- * [ group 0 buddy][ group 0 bitmap] [group 1][ group 1]...
++ * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
+  *
+  *
+  * one block each for bitmap and buddy information.
+@@ -782,22 +794,42 @@
+               if (bh[i] == NULL)
+                       goto out;
+-              if (buffer_uptodate(bh[i]) &&
+-                  !(desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))
++              if (bitmap_uptodate(bh[i]))
+                       continue;
+               lock_buffer(bh[i]);
++              if (bitmap_uptodate(bh[i])) {
++                      unlock_buffer(bh[i]);
++                      continue;
++              }
+               spin_lock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
+               if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+                       ext4_init_block_bitmap(sb, bh[i],
+                                               first_group + i, desc);
++                      set_bitmap_uptodate(bh[i]);
+                       set_buffer_uptodate(bh[i]);
+                       unlock_buffer(bh[i]);
+                       spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
+                       continue;
+               }
+               spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
++              if (buffer_uptodate(bh[i])) {
++                      /*
++                       * if not uninit if bh is uptodate,
++                       * bitmap is also uptodate
++                       */
++                      set_bitmap_uptodate(bh[i]);
++                      unlock_buffer(bh[i]);
++                      continue;
++              }
+               get_bh(bh[i]);
++              /*
++               * submit the buffer_head for read. We can
++               * safely mark the bitmap as uptodate now.
++               * We do it here so the bitmap uptodate bit
++               * get set with buffer lock held.
++               */
++              set_bitmap_uptodate(bh[i]);
+               bh[i]->b_end_io = end_buffer_read_sync;
+               submit_bh(READ, bh[i]);
+               mb_debug("read bitmap for group %lu\n", first_group + i);
+@@ -814,6 +846,8 @@
+       err = 0;
+       first_block = page->index * blocks_per_page;
++      /* init the page  */
++      memset(page_address(page), 0xff, PAGE_CACHE_SIZE);
+       for (i = 0; i < blocks_per_page; i++) {
+               int group;
+               struct ext4_group_info *grinfo;
+@@ -840,7 +874,6 @@
+                       BUG_ON(incore == NULL);
+                       mb_debug("put buddy for group %u in page %lu/%x\n",
+                               group, page->index, i * blocksize);
+-                      memset(data, 0xff, blocksize);
+                       grinfo = ext4_get_group_info(sb, group);
+                       grinfo->bb_fragments = 0;
+                       memset(grinfo->bb_counters, 0,
+@@ -848,7 +881,9 @@
+                       /*
+                        * incore got set to the group block bitmap below
+                        */
++                      ext4_lock_group(sb, group);
+                       ext4_mb_generate_buddy(sb, data, incore, group);
++                      ext4_unlock_group(sb, group);
+                       incore = NULL;
+               } else {
+                       /* this is block of bitmap */
+@@ -862,6 +897,7 @@
+                       /* mark all preallocated blks used in in-core bitmap */
+                       ext4_mb_generate_from_pa(sb, data, group);
++                      ext4_mb_generate_from_freelist(sb, data, group);
+                       ext4_unlock_group(sb, group);
+                       /* set incore so that the buddy information can be
+@@ -886,18 +922,20 @@
+ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
+                                       struct ext4_buddy *e4b)
+ {
+-      struct ext4_sb_info *sbi = EXT4_SB(sb);
+-      struct inode *inode = sbi->s_buddy_cache;
+       int blocks_per_page;
+       int block;
+       int pnum;
+       int poff;
+       struct page *page;
+       int ret;
++      struct ext4_group_info *grp;
++      struct ext4_sb_info *sbi = EXT4_SB(sb);
++      struct inode *inode = sbi->s_buddy_cache;
+       mb_debug("load group %lu\n", group);
+       blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
++      grp = ext4_get_group_info(sb, group);
+       e4b->bd_blkbits = sb->s_blocksize_bits;
+       e4b->bd_info = ext4_get_group_info(sb, group);
+@@ -905,6 +943,15 @@
+       e4b->bd_group = group;
+       e4b->bd_buddy_page = NULL;
+       e4b->bd_bitmap_page = NULL;
++      e4b->alloc_semp = &grp->alloc_sem;
++
++      /* Take the read lock on the group alloc
++       * sem. This would make sure a parallel
++       * ext4_mb_init_group happening on other
++       * groups mapped by the page is blocked
++       * till we are done with allocation
++       */
++      down_read(e4b->alloc_semp);
+       /*
+        * the buddy cache inode stores the block bitmap
+@@ -920,6 +967,14 @@
+       page = find_get_page(inode->i_mapping, pnum);
+       if (page == NULL || !PageUptodate(page)) {
+               if (page)
++                      /*
++                       * drop the page reference and try
++                       * to get the page with lock. If we
++                       * are not uptodate that implies
++                       * somebody just created the page but
++                       * is yet to initialize the same. So
++                       * wait for it to initialize.
++                       */
+                       page_cache_release(page);
+               page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
+               if (page) {
+@@ -985,6 +1040,9 @@
+               page_cache_release(e4b->bd_buddy_page);
+       e4b->bd_buddy = NULL;
+       e4b->bd_bitmap = NULL;
++
++      /* Done with the buddy cache */
++      up_read(e4b->alloc_semp);
+       return ret;
+ }
+@@ -994,6 +1052,9 @@
+               page_cache_release(e4b->bd_bitmap_page);
+       if (e4b->bd_buddy_page)
+               page_cache_release(e4b->bd_buddy_page);
++      /* Done with the buddy cache */
++      if (e4b->alloc_semp)
++              up_read(e4b->alloc_semp);
+ }
+@@ -1031,7 +1092,10 @@
+                       cur += 32;
+                       continue;
+               }
+-              mb_clear_bit_atomic(lock, cur, bm);
++              if (lock)
++                      mb_clear_bit_atomic(lock, cur, bm);
++              else
++                      mb_clear_bit(cur, bm);
+               cur++;
+       }
+ }
+@@ -1049,7 +1113,10 @@
+                       cur += 32;
+                       continue;
+               }
+-              mb_set_bit_atomic(lock, cur, bm);
++              if (lock)
++                      mb_set_bit_atomic(lock, cur, bm);
++              else
++                      mb_set_bit(cur, bm);
+               cur++;
+       }
+ }
+@@ -1296,13 +1363,20 @@
+       ac->ac_tail = ret & 0xffff;
+       ac->ac_buddy = ret >> 16;
+-      /* XXXXXXX: SUCH A HORRIBLE **CK */
+-      /*FIXME!! Why ? */
++      /*
++       * take the page reference. We want the page to be pinned
++       * so that we don't get a ext4_mb_init_cache_call for this
++       * group until we update the bitmap. That would mean we
++       * double allocate blocks. The reference is dropped
++       * in ext4_mb_release_context
++       */
+       ac->ac_bitmap_page = e4b->bd_bitmap_page;
+       get_page(ac->ac_bitmap_page);
+       ac->ac_buddy_page = e4b->bd_buddy_page;
+       get_page(ac->ac_buddy_page);
+-
++      /* on allocation we use ac to track the held semaphore */
++      ac->alloc_semp =  e4b->alloc_semp;
++      e4b->alloc_semp = NULL;
+       /* store last allocated for subsequent stream allocation */
+       if ((ac->ac_flags & EXT4_MB_HINT_DATA)) {
+               spin_lock(&sbi->s_md_lock);
+@@ -1326,6 +1400,8 @@
+       struct ext4_free_extent ex;
+       int max;
++      if (ac->ac_status == AC_STATUS_FOUND)
++              return;
+       /*
+        * We don't want to scan for a whole year
+        */
+@@ -1692,6 +1768,173 @@
+       return 0;
+ }
++/*
++ * lock the group_info alloc_sem of all the groups
++ * belonging to the same buddy cache page. This
++ * make sure other parallel operation on the buddy
++ * cache doesn't happen  whild holding the buddy cache
++ * lock
++ */
++int ext4_mb_get_buddy_cache_lock(struct super_block *sb, ext4_group_t group)
++{
++      int i;
++      int block, pnum;
++      int blocks_per_page;
++      int groups_per_page;
++      ext4_group_t first_group;
++      struct ext4_group_info *grp;
++
++      blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
++      /*
++       * the buddy cache inode stores the block bitmap
++       * and buddy information in consecutive blocks.
++       * So for each group we need two blocks.
++       */
++      block = group * 2;
++      pnum = block / blocks_per_page;
++      first_group = pnum * blocks_per_page / 2;
++
++      groups_per_page = blocks_per_page >> 1;
++      if (groups_per_page == 0)
++              groups_per_page = 1;
++      /* read all groups the page covers into the cache */
++      for (i = 0; i < groups_per_page; i++) {
++
++              if ((first_group + i) >= EXT4_SB(sb)->s_groups_count)
++                      break;
++              grp = ext4_get_group_info(sb, first_group + i);
++              /* take all groups write allocation
++               * semaphore. This make sure there is
++               * no block allocation going on in any
++               * of that groups
++               */
++              down_write(&grp->alloc_sem);
++      }
++      return i;
++}
++
++void ext4_mb_put_buddy_cache_lock(struct super_block *sb,
++                                      ext4_group_t group, int locked_group)
++{
++      int i;
++      int block, pnum;
++      int blocks_per_page;
++      ext4_group_t first_group;
++      struct ext4_group_info *grp;
++
++      blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
++      /*
++       * the buddy cache inode stores the block bitmap
++       * and buddy information in consecutive blocks.
++       * So for each group we need two blocks.
++       */
++      block = group * 2;
++      pnum = block / blocks_per_page;
++      first_group = pnum * blocks_per_page / 2;
++      /* release locks on all the groups */
++      for (i = 0; i < locked_group; i++) {
++
++              grp = ext4_get_group_info(sb, first_group + i);
++              /* take all groups write allocation
++               * semaphore. This make sure there is
++               * no block allocation going on in any
++               * of that groups
++               */
++              up_write(&grp->alloc_sem);
++      }
++
++}
++
++static int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
++{
++
++      int ret;
++      void *bitmap;
++      int blocks_per_page;
++      int block, pnum, poff;
++      int num_grp_locked = 0;
++      struct ext4_group_info *this_grp;
++      struct ext4_sb_info *sbi = EXT4_SB(sb);
++      struct inode *inode = sbi->s_buddy_cache;
++      struct page *page = NULL, *bitmap_page = NULL;
++
++      mb_debug("init group %lu\n", group);
++      blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
++      this_grp = ext4_get_group_info(sb, group);
++      /*
++       * This ensures we don't add group
++       * to this buddy cache via resize
++       */
++      num_grp_locked =  ext4_mb_get_buddy_cache_lock(sb, group);
++      if (!EXT4_MB_GRP_NEED_INIT(this_grp)) {
++              /*
++               * somebody initialized the group
++               * return without doing anything
++               */
++              ret = 0;
++              goto err;
++      }
++      /*
++       * the buddy cache inode stores the block bitmap
++       * and buddy information in consecutive blocks.
++       * So for each group we need two blocks.
++       */
++      block = group * 2;
++      pnum = block / blocks_per_page;
++      poff = block % blocks_per_page;
++      page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
++      if (page) {
++              BUG_ON(page->mapping != inode->i_mapping);
++              ret = ext4_mb_init_cache(page, NULL);
++              if (ret) {
++                      unlock_page(page);
++                      goto err;
++              }
++              unlock_page(page);
++      }
++      if (page == NULL || !PageUptodate(page)) {
++              ret = -EIO;
++              goto err;
++      }
++      mark_page_accessed(page);
++      bitmap_page = page;
++      bitmap = page_address(page) + (poff * sb->s_blocksize);
++
++      /* init buddy cache */
++      block++;
++      pnum = block / blocks_per_page;
++      poff = block % blocks_per_page;
++      page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
++      if (page == bitmap_page) {
++              /*
++               * If both the bitmap and buddy are in
++               * the same page we don't need to force
++               * init the buddy
++               */
++              unlock_page(page);
++      } else if (page) {
++              BUG_ON(page->mapping != inode->i_mapping);
++              ret = ext4_mb_init_cache(page, bitmap);
++              if (ret) {
++                      unlock_page(page);
++                      goto err;
++              }
++              unlock_page(page);
++      }
++      if (page == NULL || !PageUptodate(page)) {
++              ret = -EIO;
++              goto err;
++      }
++      mark_page_accessed(page);
++err:
++      ext4_mb_put_buddy_cache_lock(sb, group, num_grp_locked);
++      if (bitmap_page)
++              page_cache_release(bitmap_page);
++      if (page)
++              page_cache_release(page);
++      return ret;
++}
++
+ static noinline_for_stack int
+ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
+ {
+@@ -1775,7 +2018,7 @@
+                               group = 0;
+                       /* quick check to skip empty groups */
+-                      grp = ext4_get_group_info(ac->ac_sb, group);
++                      grp = ext4_get_group_info(sb, group);
+                       if (grp->bb_free == 0)
+                               continue;
+@@ -1788,10 +2031,9 @@
+                                * we need full data about the group
+                                * to make a good selection
+                                */
+-                              err = ext4_mb_load_buddy(sb, group, &e4b);
++                              err = ext4_mb_init_group(sb, group);
+                               if (err)
+                                       goto out;
+-                              ext4_mb_release_desc(&e4b);
+                       }
+                       /*
+@@ -2300,6 +2542,7 @@
+       }
+       INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
++      init_rwsem(&meta_group_info[i]->alloc_sem);
+       meta_group_info[i]->bb_free_root.rb_node = NULL;;
+ #ifdef DOUBLE_CHECK
+@@ -2327,54 +2570,6 @@
+ } /* ext4_mb_add_groupinfo */
+ /*
+- * Add a group to the existing groups.
+- * This function is used for online resize
+- */
+-int ext4_mb_add_more_groupinfo(struct super_block *sb, ext4_group_t group,
+-                             struct ext4_group_desc *desc)
+-{
+-      struct ext4_sb_info *sbi = EXT4_SB(sb);
+-      struct inode *inode = sbi->s_buddy_cache;
+-      int blocks_per_page;
+-      int block;
+-      int pnum;
+-      struct page *page;
+-      int err;
+-
+-      /* Add group based on group descriptor*/
+-      err = ext4_mb_add_groupinfo(sb, group, desc);
+-      if (err)
+-              return err;
+-
+-      /*
+-       * Cache pages containing dynamic mb_alloc datas (buddy and bitmap
+-       * datas) are set not up to date so that they will be re-initilaized
+-       * during the next call to ext4_mb_load_buddy
+-       */
+-
+-      /* Set buddy page as not up to date */
+-      blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
+-      block = group * 2;
+-      pnum = block / blocks_per_page;
+-      page = find_get_page(inode->i_mapping, pnum);
+-      if (page != NULL) {
+-              ClearPageUptodate(page);
+-              page_cache_release(page);
+-      }
+-
+-      /* Set bitmap page as not up to date */
+-      block++;
+-      pnum = block / blocks_per_page;
+-      page = find_get_page(inode->i_mapping, pnum);
+-      if (page != NULL) {
+-              ClearPageUptodate(page);
+-              page_cache_release(page);
+-      }
+-
+-      return 0;
+-}
+-
+-/*
+  * Update an existing group.
+  * This function is used for online resize
+  */
+@@ -2493,6 +2688,8 @@
+       if (sbi->s_mb_offsets == NULL) {
+               return -ENOMEM;
+       }
++
++      i = (sb->s_blocksize_bits + 2) * sizeof(unsigned int);
+       sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
+       if (sbi->s_mb_maxs == NULL) {
+               kfree(sbi->s_mb_maxs);
+@@ -2843,8 +3040,8 @@
+           in_range(block + len - 1, ext4_inode_table(sb, gdp),
+                    EXT4_SB(sb)->s_itb_per_group)) {
+               ext4_error(sb, __func__,
+-                         "Allocating block in system zone - block = %llu",
+-                         block);
++                         "Allocating block %llu in system zone of %d group\n",
++                         block, ac->ac_b_ex.fe_group);
+               /* File system mounted not to panic on error
+                * Fix the bitmap and repeat the block allocation
+                * We leak some of the blocks here.
+@@ -2866,10 +3063,9 @@
+               }
+       }
+ #endif
+-      mb_set_bits(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group), bitmap_bh->b_data,
+-                              ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len);
+-
+       spin_lock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
++      mb_set_bits(NULL, bitmap_bh->b_data,
++                              ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len);
+       if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+               gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
+               gdp->bg_free_blocks_count =
+@@ -3307,6 +3503,32 @@
+ }
+ /*
++ * the function goes through all block freed in the group
++ * but not yet committed and marks them used in in-core bitmap.
++ * buddy must be generated from this bitmap
++ * Need to be called with ext4 group lock (ext4_lock_group)
++ */
++static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
++                                              ext4_group_t group)
++{
++      struct rb_node *n;
++      struct ext4_group_info *grp;
++      struct ext4_free_data *entry;
++
++      grp = ext4_get_group_info(sb, group);
++      n = rb_first(&(grp->bb_free_root));
++
++      while (n) {
++              entry = rb_entry(n, struct ext4_free_data, node);
++              mb_set_bits(sb_bgl_lock(EXT4_SB(sb), group),
++                              bitmap, entry->start_blk,
++                              entry->count);
++              n = rb_next(n);
++      }
++      return;
++}
++
++/*
+  * the function goes through all preallocation in this group and marks them
+  * used in in-core bitmap. buddy must be generated from this bitmap
+  * Need to be called with ext4 group lock (ext4_lock_group)
+@@ -3468,6 +3690,8 @@
+       pa->pa_free = pa->pa_len;
+       atomic_set(&pa->pa_count, 1);
+       spin_lock_init(&pa->pa_lock);
++      INIT_LIST_HEAD(&pa->pa_inode_list);
++      INIT_LIST_HEAD(&pa->pa_group_list);
+       pa->pa_deleted = 0;
+       pa->pa_linear = 0;
+@@ -3526,6 +3750,7 @@
+       atomic_set(&pa->pa_count, 1);
+       spin_lock_init(&pa->pa_lock);
+       INIT_LIST_HEAD(&pa->pa_inode_list);
++      INIT_LIST_HEAD(&pa->pa_group_list);
+       pa->pa_deleted = 0;
+       pa->pa_linear = 1;
+@@ -4068,6 +4293,7 @@
+       ac->ac_pa = NULL;
+       ac->ac_bitmap_page = NULL;
+       ac->ac_buddy_page = NULL;
++      ac->alloc_semp = NULL;
+       ac->ac_lg = NULL;
+       /* we have to define context: we'll we work with a file or
+@@ -4233,18 +4459,23 @@
+                       pa->pa_free -= ac->ac_b_ex.fe_len;
+                       pa->pa_len -= ac->ac_b_ex.fe_len;
+                       spin_unlock(&pa->pa_lock);
+-                      /*
+-                       * We want to add the pa to the right bucket.
+-                       * Remove it from the list and while adding
+-                       * make sure the list to which we are adding
+-                       * doesn't grow big.
+-                       */
+-                      if (likely(pa->pa_free)) {
+-                              spin_lock(pa->pa_obj_lock);
+-                              list_del_rcu(&pa->pa_inode_list);
+-                              spin_unlock(pa->pa_obj_lock);
+-                              ext4_mb_add_n_trim(ac);
+-                      }
++              }
++      }
++      if (ac->alloc_semp)
++              up_read(ac->alloc_semp);
++      if (pa) {
++              /*
++               * We want to add the pa to the right bucket.
++               * Remove it from the list and while adding
++               * make sure the list to which we are adding
++               * doesn't grow big.  We need to release
++               * alloc_semp before calling ext4_mb_add_n_trim()
++               */
++              if (pa->pa_linear && likely(pa->pa_free)) {
++                      spin_lock(pa->pa_obj_lock);
++                      list_del_rcu(&pa->pa_inode_list);
++                      spin_unlock(pa->pa_obj_lock);
++                      ext4_mb_add_n_trim(ac);
+               }
+               ext4_mb_put_pa(ac, ac->ac_sb, pa);
+       }
+@@ -4313,7 +4544,7 @@
+       }
+       if (ar->len == 0) {
+               *errp = -EDQUOT;
+-              return 0;
++              goto out3;
+       }
+       inquota = ar->len;
+@@ -4348,10 +4579,14 @@
+                               ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
+                       ext4_mb_new_preallocation(ac);
+       }
+-
+       if (likely(ac->ac_status == AC_STATUS_FOUND)) {
+               *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_blks);
+               if (*errp ==  -EAGAIN) {
++                      /*
++                       * drop the reference that we took
++                       * in ext4_mb_use_best_found
++                       */
++                      ext4_mb_release_context(ac);
+                       ac->ac_b_ex.fe_group = 0;
+                       ac->ac_b_ex.fe_start = 0;
+                       ac->ac_b_ex.fe_len = 0;
+@@ -4382,6 +4617,13 @@
+ out1:
+       if (ar->len < inquota)
+               DQUOT_FREE_BLOCK(ar->inode, inquota - ar->len);
++out3:
++      if (!ar->len) {
++              if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag)
++                      /* release all the reserved blocks if non delalloc */
++                      percpu_counter_sub(&sbi->s_dirtyblocks_counter,
++                                              reserv_blks);
++      }
+       return block;
+ }
+@@ -4403,12 +4645,13 @@
+ static noinline_for_stack int
+ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
+-                        ext4_group_t group, ext4_grpblk_t block, int count)
++                    struct ext4_free_data *new_entry)
+ {
++      ext4_grpblk_t block;
++      struct ext4_free_data *entry;
+       struct ext4_group_info *db = e4b->bd_info;
+       struct super_block *sb = e4b->bd_sb;
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+-      struct ext4_free_data *entry, *new_entry;
+       struct rb_node **n = &db->bb_free_root.rb_node, *node;
+       struct rb_node *parent = NULL, *new_node;
+@@ -4416,14 +4659,9 @@
+       BUG_ON(e4b->bd_bitmap_page == NULL);
+       BUG_ON(e4b->bd_buddy_page == NULL);
+-      new_entry  = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS);
+-      new_entry->start_blk = block;
+-      new_entry->group  = group;
+-      new_entry->count = count;
+-      new_entry->t_tid = handle->h_transaction->t_tid;
+       new_node = &new_entry->node;
++      block = new_entry->start_blk;
+-      ext4_lock_group(sb, group);
+       if (!*n) {
+               /* first free block exent. We need to
+                  protect buddy cache from being freed,
+@@ -4441,7 +4679,6 @@
+               else if (block >= (entry->start_blk + entry->count))
+                       n = &(*n)->rb_right;
+               else {
+-                      ext4_unlock_group(sb, group);
+                       ext4_error(sb, __func__,
+                           "Double free of blocks %d (%d %d)\n",
+                           block, entry->start_blk, entry->count);
+@@ -4483,7 +4720,6 @@
+       spin_lock(&sbi->s_md_lock);
+       list_add(&new_entry->list, &handle->h_transaction->t_private_list);
+       spin_unlock(&sbi->s_md_lock);
+-      ext4_unlock_group(sb, group);
+       return 0;
+ }
+@@ -4581,11 +4817,6 @@
+       err = ext4_journal_get_write_access(handle, gd_bh);
+       if (err)
+               goto error_return;
+-
+-      err = ext4_mb_load_buddy(sb, block_group, &e4b);
+-      if (err)
+-              goto error_return;
+-
+ #ifdef AGGRESSIVE_CHECK
+       {
+               int i;
+@@ -4593,13 +4824,6 @@
+                       BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
+       }
+ #endif
+-      mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data,
+-                      bit, count);
+-
+-      /* We dirtied the bitmap block */
+-      BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
+-      err = ext4_journal_dirty_metadata(handle, bitmap_bh);
+-
+       if (ac) {
+               ac->ac_b_ex.fe_group = block_group;
+               ac->ac_b_ex.fe_start = bit;
+@@ -4607,12 +4831,33 @@
+               ext4_mb_store_history(ac);
+       }
++      err = ext4_mb_load_buddy(sb, block_group, &e4b);
++      if (err)
++              goto error_return;
+       if (metadata) {
+-              /* blocks being freed are metadata. these blocks shouldn't
+-               * be used until this transaction is committed */
+-              ext4_mb_free_metadata(handle, &e4b, block_group, bit, count);
++              struct ext4_free_data *new_entry;
++              /*
++               * blocks being freed are metadata. these blocks shouldn't
++               * be used until this transaction is committed
++               */
++              new_entry  = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS);
++              new_entry->start_blk = bit;
++              new_entry->group  = block_group;
++              new_entry->count = count;
++              new_entry->t_tid = handle->h_transaction->t_tid;
++              ext4_lock_group(sb, block_group);
++              mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data,
++                              bit, count);
++              ext4_mb_free_metadata(handle, &e4b, new_entry);
++              ext4_unlock_group(sb, block_group);
+       } else {
+               ext4_lock_group(sb, block_group);
++              /* need to update group_info->bb_free and bitmap
++               * with group lock held. generate_buddy look at
++               * them with group lock_held
++               */
++              mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data,
++                              bit, count);
+               mb_free_blocks(inode, &e4b, bit, count);
+               ext4_mb_return_to_preallocation(inode, &e4b, block, count);
+               ext4_unlock_group(sb, block_group);
+@@ -4635,6 +4880,10 @@
+       *freed += count;
++      /* We dirtied the bitmap block */
++      BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
++      err = ext4_journal_dirty_metadata(handle, bitmap_bh);
++
+       /* And the group descriptor block */
+       BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
+       ret = ext4_journal_dirty_metadata(handle, gd_bh);
+--- kernel-maemo-2.6.28.test.orig/fs/ext4/mballoc.h
++++ kernel-maemo-2.6.28.test/fs/ext4/mballoc.h
+@@ -20,6 +20,7 @@
+ #include <linux/version.h>
+ #include <linux/blkdev.h>
+ #include <linux/marker.h>
++#include <linux/mutex.h>
+ #include "ext4_jbd2.h"
+ #include "ext4.h"
+ #include "group.h"
+@@ -98,9 +99,6 @@
+  */
+ #define MB_DEFAULT_GROUP_PREALLOC     512
+-static struct kmem_cache *ext4_pspace_cachep;
+-static struct kmem_cache *ext4_ac_cachep;
+-static struct kmem_cache *ext4_free_ext_cachep;
+ struct ext4_free_data {
+       /* this links the free block information from group_info */
+@@ -130,6 +128,7 @@
+ #ifdef DOUBLE_CHECK
+       void            *bb_bitmap;
+ #endif
++      struct rw_semaphore alloc_sem;
+       unsigned short  bb_counters[];
+ };
+@@ -217,6 +216,11 @@
+       __u8 ac_op;             /* operation, for history only */
+       struct page *ac_bitmap_page;
+       struct page *ac_buddy_page;
++      /*
++       * pointer to the held semaphore upon successful
++       * block allocation
++       */
++      struct rw_semaphore *alloc_semp;
+       struct ext4_prealloc_space *ac_pa;
+       struct ext4_locality_group *ac_lg;
+ };
+@@ -250,6 +254,7 @@
+       struct super_block *bd_sb;
+       __u16 bd_blkbits;
+       ext4_group_t bd_group;
++      struct rw_semaphore *alloc_semp;
+ };
+ #define EXT4_MB_BITMAP(e4b)   ((e4b)->bd_bitmap)
+ #define EXT4_MB_BUDDY(e4b)    ((e4b)->bd_buddy)
+@@ -259,25 +264,12 @@
+ {
+       return;
+ }
+-#else
+-static void ext4_mb_store_history(struct ext4_allocation_context *ac);
+ #endif
+ #define in_range(b, first, len)       ((b) >= (first) && (b) <= (first) + (len) - 1)
+ struct buffer_head *read_block_bitmap(struct super_block *, ext4_group_t);
+-static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
+-                                      ext4_group_t group);
+-static void ext4_mb_return_to_preallocation(struct inode *inode,
+-                                      struct ext4_buddy *e4b, sector_t block,
+-                                      int count);
+-static void ext4_mb_put_pa(struct ext4_allocation_context *,
+-                      struct super_block *, struct ext4_prealloc_space *pa);
+-static int ext4_mb_init_per_dev_proc(struct super_block *sb);
+-static int ext4_mb_destroy_per_dev_proc(struct super_block *sb);
+-static void release_blocks_on_commit(journal_t *journal, transaction_t *txn);
+-
+ static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
+ {
+@@ -303,7 +295,7 @@
+                                               &(grinfo->bb_state));
+ }
+-static ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
++static inline ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
+                                       struct ext4_free_extent *fex)
+ {
+       ext4_fsblk_t block;
+--- kernel-maemo-2.6.28.test.orig/fs/ext4/migrate.c
++++ kernel-maemo-2.6.28.test/fs/ext4/migrate.c
+@@ -480,7 +480,7 @@
+                                       + 1);
+       if (IS_ERR(handle)) {
+               retval = PTR_ERR(handle);
+-              goto err_out;
++              return retval;
+       }
+       tmp_inode = ext4_new_inode(handle,
+                               inode->i_sb->s_root->d_inode,
+@@ -488,8 +488,7 @@
+       if (IS_ERR(tmp_inode)) {
+               retval = -ENOMEM;
+               ext4_journal_stop(handle);
+-              tmp_inode = NULL;
+-              goto err_out;
++              return retval;
+       }
+       i_size_write(tmp_inode, i_size_read(inode));
+       /*
+@@ -617,8 +616,7 @@
+       ext4_journal_stop(handle);
+-      if (tmp_inode)
+-              iput(tmp_inode);
++      iput(tmp_inode);
+       return retval;
+ }
+--- kernel-maemo-2.6.28.test.orig/fs/ext4/namei.c
++++ kernel-maemo-2.6.28.test/fs/ext4/namei.c
+@@ -372,6 +372,8 @@
+               goto fail;
+       }
+       hinfo->hash_version = root->info.hash_version;
++      if (hinfo->hash_version <= DX_HASH_TEA)
++              hinfo->hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
+       hinfo->seed = EXT4_SB(dir->i_sb)->s_hash_seed;
+       if (d_name)
+               ext4fs_dirhash(d_name->name, d_name->len, hinfo);
+@@ -641,6 +643,9 @@
+       dir = dir_file->f_path.dentry->d_inode;
+       if (!(EXT4_I(dir)->i_flags & EXT4_INDEX_FL)) {
+               hinfo.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
++              if (hinfo.hash_version <= DX_HASH_TEA)
++                      hinfo.hash_version +=
++                              EXT4_SB(dir->i_sb)->s_hash_unsigned;
+               hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
+               count = htree_dirblock_to_tree(dir_file, dir, 0, &hinfo,
+                                              start_hash, start_minor_hash);
+@@ -1367,7 +1372,7 @@
+       struct fake_dirent *fde;
+       blocksize =  dir->i_sb->s_blocksize;
+-      dxtrace(printk(KERN_DEBUG "Creating index\n"));
++      dxtrace(printk(KERN_DEBUG "Creating index: inode %lu\n", dir->i_ino));
+       retval = ext4_journal_get_write_access(handle, bh);
+       if (retval) {
+               ext4_std_error(dir->i_sb, retval);
+@@ -1376,6 +1381,20 @@
+       }
+       root = (struct dx_root *) bh->b_data;
++      /* The 0th block becomes the root, move the dirents out */
++      fde = &root->dotdot;
++      de = (struct ext4_dir_entry_2 *)((char *)fde +
++              ext4_rec_len_from_disk(fde->rec_len));
++      if ((char *) de >= (((char *) root) + blocksize)) {
++              ext4_error(dir->i_sb, __func__,
++                         "invalid rec_len for '..' in inode %lu",
++                         dir->i_ino);
++              brelse(bh);
++              return -EIO;
++      }
++      len = ((char *) root) + blocksize - (char *) de;
++
++      /* Allocate new block for the 0th block's dirents */
+       bh2 = ext4_append(handle, dir, &block, &retval);
+       if (!(bh2)) {
+               brelse(bh);
+@@ -1384,11 +1403,6 @@
+       EXT4_I(dir)->i_flags |= EXT4_INDEX_FL;
+       data1 = bh2->b_data;
+-      /* The 0th block becomes the root, move the dirents out */
+-      fde = &root->dotdot;
+-      de = (struct ext4_dir_entry_2 *)((char *)fde +
+-              ext4_rec_len_from_disk(fde->rec_len));
+-      len = ((char *) root) + blocksize - (char *) de;
+       memcpy (data1, de, len);
+       de = (struct ext4_dir_entry_2 *) data1;
+       top = data1 + len;
+@@ -1408,6 +1422,8 @@
+       /* Initialize as for dx_probe */
+       hinfo.hash_version = root->info.hash_version;
++      if (hinfo.hash_version <= DX_HASH_TEA)
++              hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
+       hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
+       ext4fs_dirhash(name, namelen, &hinfo);
+       frame = frames;
+@@ -2208,8 +2224,7 @@
+                * We have a transaction open.  All is sweetness.  It also sets
+                * i_size in generic_commit_write().
+                */
+-              err = __page_symlink(inode, symname, l,
+-                              mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
++              err = __page_symlink(inode, symname, l, 1);
+               if (err) {
+                       clear_nlink(inode);
+                       ext4_mark_inode_dirty(handle, inode);
+--- kernel-maemo-2.6.28.test.orig/fs/ext4/resize.c
++++ kernel-maemo-2.6.28.test/fs/ext4/resize.c
+@@ -284,11 +284,9 @@
+       if ((err = extend_or_restart_transaction(handle, 2, bh)))
+               goto exit_bh;
+-      mark_bitmap_end(input->blocks_count, EXT4_BLOCKS_PER_GROUP(sb),
+-                      bh->b_data);
++      mark_bitmap_end(input->blocks_count, sb->s_blocksize * 8, bh->b_data);
+       ext4_journal_dirty_metadata(handle, bh);
+       brelse(bh);
+-
+       /* Mark unused entries in inode bitmap used */
+       ext4_debug("clear inode bitmap %#04llx (+%llu)\n",
+                  input->inode_bitmap, input->inode_bitmap - start);
+@@ -297,7 +295,7 @@
+               goto exit_journal;
+       }
+-      mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), EXT4_BLOCKS_PER_GROUP(sb),
++      mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
+                       bh->b_data);
+       ext4_journal_dirty_metadata(handle, bh);
+ exit_bh:
+@@ -747,6 +745,7 @@
+       struct inode *inode = NULL;
+       handle_t *handle;
+       int gdb_off, gdb_num;
++      int num_grp_locked = 0;
+       int err, err2;
+       gdb_num = input->group / EXT4_DESC_PER_BLOCK(sb);
+@@ -787,6 +786,7 @@
+               }
+       }
++
+       if ((err = verify_group_input(sb, input)))
+               goto exit_put;
+@@ -855,24 +855,29 @@
+          * using the new disk blocks.
+          */
++      num_grp_locked = ext4_mb_get_buddy_cache_lock(sb, input->group);
+       /* Update group descriptor block for new group */
+       gdp = (struct ext4_group_desc *)((char *)primary->b_data +
+                                        gdb_off * EXT4_DESC_SIZE(sb));
++      memset(gdp, 0, EXT4_DESC_SIZE(sb));
+       ext4_block_bitmap_set(sb, gdp, input->block_bitmap); /* LV FIXME */
+       ext4_inode_bitmap_set(sb, gdp, input->inode_bitmap); /* LV FIXME */
+       ext4_inode_table_set(sb, gdp, input->inode_table); /* LV FIXME */
+       gdp->bg_free_blocks_count = cpu_to_le16(input->free_blocks_count);
+       gdp->bg_free_inodes_count = cpu_to_le16(EXT4_INODES_PER_GROUP(sb));
++      gdp->bg_flags = cpu_to_le16(EXT4_BG_INODE_ZEROED);
+       gdp->bg_checksum = ext4_group_desc_csum(sbi, input->group, gdp);
+       /*
+        * We can allocate memory for mb_alloc based on the new group
+        * descriptor
+        */
+-      err = ext4_mb_add_more_groupinfo(sb, input->group, gdp);
+-      if (err)
++      err = ext4_mb_add_groupinfo(sb, input->group, gdp);
++      if (err) {
++              ext4_mb_put_buddy_cache_lock(sb, input->group, num_grp_locked);
+               goto exit_journal;
++      }
+       /*
+        * Make the new blocks and inodes valid next.  We do this before
+@@ -914,6 +919,7 @@
+       /* Update the global fs size fields */
+       sbi->s_groups_count++;
++      ext4_mb_put_buddy_cache_lock(sb, input->group, num_grp_locked);
+       ext4_journal_dirty_metadata(handle, primary);
+@@ -975,9 +981,7 @@
+       struct buffer_head *bh;
+       handle_t *handle;
+       int err;
+-      unsigned long freed_blocks;
+       ext4_group_t group;
+-      struct ext4_group_info *grp;
+       /* We don't need to worry about locking wrt other resizers just
+        * yet: we're going to revalidate es->s_blocks_count after
+@@ -1076,57 +1080,13 @@
+       unlock_super(sb);
+       ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count,
+                  o_blocks_count + add);
+-      ext4_free_blocks_sb(handle, sb, o_blocks_count, add, &freed_blocks);
++      /* We add the blocks to the bitmap and set the group need init bit */
++      ext4_add_groupblocks(handle, sb, o_blocks_count, add);
+       ext4_debug("freed blocks %llu through %llu\n", o_blocks_count,
+                  o_blocks_count + add);
+       if ((err = ext4_journal_stop(handle)))
+               goto exit_put;
+-      /*
+-       * Mark mballoc pages as not up to date so that they will be updated
+-       * next time they are loaded by ext4_mb_load_buddy.
+-       *
+-       * XXX Bad, Bad, BAD!!!  We should not be overloading the
+-       * Uptodate flag, particularly on thte bitmap bh, as way of
+-       * hinting to ext4_mb_load_buddy() that it needs to be
+-       * overloaded.  A user could take a LVM snapshot, then do an
+-       * on-line fsck, and clear the uptodate flag, and this would
+-       * not be a bug in userspace, but a bug in the kernel.  FIXME!!!
+-       */
+-      {
+-              struct ext4_sb_info *sbi = EXT4_SB(sb);
+-              struct inode *inode = sbi->s_buddy_cache;
+-              int blocks_per_page;
+-              int block;
+-              int pnum;
+-              struct page *page;
+-
+-              /* Set buddy page as not up to date */
+-              blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
+-              block = group * 2;
+-              pnum = block / blocks_per_page;
+-              page = find_get_page(inode->i_mapping, pnum);
+-              if (page != NULL) {
+-                      ClearPageUptodate(page);
+-                      page_cache_release(page);
+-              }
+-
+-              /* Set bitmap page as not up to date */
+-              block++;
+-              pnum = block / blocks_per_page;
+-              page = find_get_page(inode->i_mapping, pnum);
+-              if (page != NULL) {
+-                      ClearPageUptodate(page);
+-                      page_cache_release(page);
+-              }
+-
+-              /* Get the info on the last group */
+-              grp = ext4_get_group_info(sb, group);
+-
+-              /* Update free blocks in group info */
+-              ext4_mb_update_group_info(grp, add);
+-      }
+-
+       if (test_opt(sb, DEBUG))
+               printk(KERN_DEBUG "EXT4-fs: extended group to %llu blocks\n",
+                      ext4_blocks_count(es));
+--- kernel-maemo-2.6.28.test.orig/fs/ext4/super.c
++++ kernel-maemo-2.6.28.test/fs/ext4/super.c
+@@ -1445,7 +1445,6 @@
+       ext4_group_t flex_group_count;
+       ext4_group_t flex_group;
+       int groups_per_flex = 0;
+-      __u64 block_bitmap = 0;
+       int i;
+       if (!sbi->s_es->s_log_groups_per_flex) {
+@@ -1468,9 +1467,6 @@
+               goto failed;
+       }
+-      gdp = ext4_get_group_desc(sb, 1, &bh);
+-      block_bitmap = ext4_block_bitmap(sb, gdp) - 1;
+-
+       for (i = 0; i < sbi->s_groups_count; i++) {
+               gdp = ext4_get_group_desc(sb, i, &bh);
+@@ -1873,8 +1869,8 @@
+       char *cp;
+       int ret = -EINVAL;
+       int blocksize;
+-      int db_count;
+-      int i;
++      unsigned int db_count;
++      unsigned int i;
+       int needs_recovery, has_huge_files;
+       __le32 features;
+       __u64 blocks_count;
+@@ -2118,6 +2114,18 @@
+       for (i = 0; i < 4; i++)
+               sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
+       sbi->s_def_hash_version = es->s_def_hash_version;
++      i = le32_to_cpu(es->s_flags);
++      if (i & EXT2_FLAGS_UNSIGNED_HASH)
++              sbi->s_hash_unsigned = 3;
++      else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
++#ifdef __CHAR_UNSIGNED__
++              es->s_flags |= cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
++              sbi->s_hash_unsigned = 3;
++#else
++              es->s_flags |= cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
++#endif
++              mark_sb_dirty(sb);
++      }
+       if (sbi->s_blocks_per_group > blocksize * 8) {
+               printk(KERN_ERR
+@@ -2145,20 +2153,30 @@
+       if (EXT4_BLOCKS_PER_GROUP(sb) == 0)
+               goto cantfind_ext4;
+-      /* ensure blocks_count calculation below doesn't sign-extend */
+-      if (ext4_blocks_count(es) + EXT4_BLOCKS_PER_GROUP(sb) <
+-          le32_to_cpu(es->s_first_data_block) + 1) {
+-              printk(KERN_WARNING "EXT4-fs: bad geometry: block count %llu, "
+-                     "first data block %u, blocks per group %lu\n",
+-                      ext4_blocks_count(es),
+-                      le32_to_cpu(es->s_first_data_block),
+-                      EXT4_BLOCKS_PER_GROUP(sb));
++        /*
++         * It makes no sense for the first data block to be beyond the end
++         * of the filesystem.
++         */
++        if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) {
++                printk(KERN_WARNING "EXT4-fs: bad geometry: first data"
++                     "block %u is beyond end of filesystem (%llu)\n",
++                     le32_to_cpu(es->s_first_data_block),
++                     ext4_blocks_count(es));
+               goto failed_mount;
+       }
+       blocks_count = (ext4_blocks_count(es) -
+                       le32_to_cpu(es->s_first_data_block) +
+                       EXT4_BLOCKS_PER_GROUP(sb) - 1);
+       do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb));
++      if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) {
++              printk(KERN_WARNING "EXT4-fs: groups count too large: %u "
++                     "(block count %llu, first data block %u, "
++                     "blocks per group %lu)\n", sbi->s_groups_count,
++                     ext4_blocks_count(es),
++                     le32_to_cpu(es->s_first_data_block),
++                     EXT4_BLOCKS_PER_GROUP(sb));
++              goto failed_mount;
++      }
+       sbi->s_groups_count = blocks_count;
+       db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
+                  EXT4_DESC_PER_BLOCK(sb);
+@@ -2896,15 +2914,15 @@
+ static int ext4_sync_fs(struct super_block *sb, int wait)
+ {
+-      int ret = 0;
++      tid_t target;
+       trace_mark(ext4_sync_fs, "dev %s wait %d", sb->s_id, wait);
+       mark_sb_clean(sb);
+-      if (wait)
+-              ret = ext4_force_commit(sb);
+-      else
+-              jbd2_journal_start_commit(EXT4_SB(sb)->s_journal, NULL);
+-      return ret;
++      if (jbd2_journal_start_commit(EXT4_SB(sb)->s_journal, &target)) {
++              if (wait)
++                      jbd2_log_wait_commit(EXT4_SB(sb)->s_journal, target);
++      }
++      return 0;
+ }
+ /*
+--- kernel-maemo-2.6.28.test.orig/fs/fcntl.c
++++ kernel-maemo-2.6.28.test/fs/fcntl.c
+@@ -50,7 +50,7 @@
+       return res;
+ }
+-asmlinkage long sys_dup3(unsigned int oldfd, unsigned int newfd, int flags)
++SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
+ {
+       int err = -EBADF;
+       struct file * file, *tofree;
+@@ -113,7 +113,7 @@
+       return err;
+ }
+-asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd)
++SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
+ {
+       if (unlikely(newfd == oldfd)) { /* corner case */
+               struct files_struct *files = current->files;
+@@ -126,7 +126,7 @@
+       return sys_dup3(oldfd, newfd, 0);
+ }
+-asmlinkage long sys_dup(unsigned int fildes)
++SYSCALL_DEFINE1(dup, unsigned int, fildes)
+ {
+       int ret = -EBADF;
+       struct file *file = fget(fildes);
+@@ -334,7 +334,7 @@
+       return err;
+ }
+-asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg)
++SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
+ {     
+       struct file *filp;
+       long err = -EBADF;
+@@ -357,7 +357,8 @@
+ }
+ #if BITS_PER_LONG == 32
+-asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg)
++SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
++              unsigned long, arg)
+ {     
+       struct file * filp;
+       long err;
+--- kernel-maemo-2.6.28.test.orig/fs/filesystems.c
++++ kernel-maemo-2.6.28.test/fs/filesystems.c
+@@ -179,7 +179,7 @@
+ /*
+  * Whee.. Weird sysv syscall. 
+  */
+-asmlinkage long sys_sysfs(int option, unsigned long arg1, unsigned long arg2)
++SYSCALL_DEFINE3(sysfs, int, option, unsigned long, arg1, unsigned long, arg2)
+ {
+       int retval = -EINVAL;
+--- kernel-maemo-2.6.28.test.orig/fs/fs-writeback.c
++++ kernel-maemo-2.6.28.test/fs/fs-writeback.c
+@@ -315,6 +315,7 @@
+       int ret;
+       BUG_ON(inode->i_state & I_SYNC);
++      WARN_ON(inode->i_state & I_NEW);
+       /* Set I_SYNC, reset I_DIRTY */
+       dirty = inode->i_state & I_DIRTY;
+@@ -339,6 +340,7 @@
+       }
+       spin_lock(&inode_lock);
++      WARN_ON(inode->i_state & I_NEW);
+       inode->i_state &= ~I_SYNC;
+       if (!(inode->i_state & I_FREEING)) {
+               if (!(inode->i_state & I_DIRTY) &&
+@@ -462,9 +464,6 @@
+  * If we're a pdlfush thread, then implement pdflush collision avoidance
+  * against the entire list.
+  *
+- * WB_SYNC_HOLD is a hack for sys_sync(): reattach the inode to sb->s_dirty so
+- * that it can be located for waiting on in __writeback_single_inode().
+- *
+  * If `bdi' is non-zero then we're being asked to writeback a specific queue.
+  * This function assumes that the blockdev superblock's inodes are backed by
+  * a variety of queues, so all inodes are searched.  For other superblocks,
+@@ -484,6 +483,7 @@
+                               struct writeback_control *wbc)
+ {
+       const unsigned long start = jiffies;    /* livelock avoidance */
++      int sync = wbc->sync_mode == WB_SYNC_ALL;
+       spin_lock(&inode_lock);
+       if (!wbc->for_kupdate || list_empty(&sb->s_io))
+@@ -513,6 +513,11 @@
+                       break;
+               }
++              if (inode->i_state & I_NEW) {
++                      requeue_io(inode);
++                      continue;
++              }
++
+               if (wbc->nonblocking && bdi_write_congested(bdi)) {
+                       wbc->encountered_congestion = 1;
+                       if (!sb_is_blkdev_sb(sb))
+@@ -540,10 +545,6 @@
+               __iget(inode);
+               pages_skipped = wbc->pages_skipped;
+               __writeback_single_inode(inode, wbc);
+-              if (wbc->sync_mode == WB_SYNC_HOLD) {
+-                      inode->dirtied_when = jiffies;
+-                      list_move(&inode->i_list, &sb->s_dirty);
+-              }
+               if (current_is_pdflush())
+                       writeback_release(bdi);
+               if (wbc->pages_skipped != pages_skipped) {
+@@ -564,7 +565,50 @@
+               if (!list_empty(&sb->s_more_io))
+                       wbc->more_io = 1;
+       }
+-      spin_unlock(&inode_lock);
++
++      if (sync) {
++              struct inode *inode, *old_inode = NULL;
++
++              /*
++               * Data integrity sync. Must wait for all pages under writeback,
++               * because there may have been pages dirtied before our sync
++               * call, but which had writeout started before we write it out.
++               * In which case, the inode may not be on the dirty list, but
++               * we still have to wait for that writeout.
++               */
++              list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
++                      struct address_space *mapping;
++
++                      if (inode->i_state &
++                                      (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
++                              continue;
++                      mapping = inode->i_mapping;
++                      if (mapping->nrpages == 0)
++                              continue;
++                      __iget(inode);
++                      spin_unlock(&inode_lock);
++                      /*
++                       * We hold a reference to 'inode' so it couldn't have
++                       * been removed from s_inodes list while we dropped the
++                       * inode_lock.  We cannot iput the inode now as we can
++                       * be holding the last reference and we cannot iput it
++                       * under inode_lock. So we keep the reference and iput
++                       * it later.
++                       */
++                      iput(old_inode);
++                      old_inode = inode;
++
++                      filemap_fdatawait(mapping);
++
++                      cond_resched();
++
++                      spin_lock(&inode_lock);
++              }
++              spin_unlock(&inode_lock);
++              iput(old_inode);
++      } else
++              spin_unlock(&inode_lock);
++
+       return;         /* Leave any unwritten inodes on s_io */
+ }
+ EXPORT_SYMBOL_GPL(generic_sync_sb_inodes);
+@@ -629,8 +673,7 @@
+ /*
+  * writeback and wait upon the filesystem's dirty inodes.  The caller will
+- * do this in two passes - one to write, and one to wait.  WB_SYNC_HOLD is
+- * used to park the written inodes on sb->s_dirty for the wait pass.
++ * do this in two passes - one to write, and one to wait.
+  *
+  * A finite limit is set on the number of pages which will be written.
+  * To prevent infinite livelock of sys_sync().
+@@ -641,30 +684,21 @@
+ void sync_inodes_sb(struct super_block *sb, int wait)
+ {
+       struct writeback_control wbc = {
+-              .sync_mode      = wait ? WB_SYNC_ALL : WB_SYNC_HOLD,
++              .sync_mode      = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
+               .range_start    = 0,
+               .range_end      = LLONG_MAX,
+       };
+-      unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
+-      unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
+-      wbc.nr_to_write = nr_dirty + nr_unstable +
+-                      (inodes_stat.nr_inodes - inodes_stat.nr_unused) +
+-                      nr_dirty + nr_unstable;
+-      wbc.nr_to_write += wbc.nr_to_write / 2;         /* Bit more for luck */
+-      sync_sb_inodes(sb, &wbc);
+-}
++      if (!wait) {
++              unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
++              unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
++
++              wbc.nr_to_write = nr_dirty + nr_unstable +
++                      (inodes_stat.nr_inodes - inodes_stat.nr_unused);
++      } else
++              wbc.nr_to_write = LONG_MAX; /* doesn't actually matter */
+-/*
+- * Rather lame livelock avoidance.
+- */
+-static void set_sb_syncing(int val)
+-{
+-      struct super_block *sb;
+-      spin_lock(&sb_lock);
+-      list_for_each_entry_reverse(sb, &super_blocks, s_list)
+-              sb->s_syncing = val;
+-      spin_unlock(&sb_lock);
++      sync_sb_inodes(sb, &wbc);
+ }
+ /**
+@@ -693,9 +727,6 @@
+       spin_lock(&sb_lock);
+ restart:
+       list_for_each_entry(sb, &super_blocks, s_list) {
+-              if (sb->s_syncing)
+-                      continue;
+-              sb->s_syncing = 1;
+               sb->s_count++;
+               spin_unlock(&sb_lock);
+               down_read(&sb->s_umount);
+@@ -713,13 +744,10 @@
+ void sync_inodes(int wait)
+ {
+-      set_sb_syncing(0);
+       __sync_inodes(0);
+-      if (wait) {
+-              set_sb_syncing(0);
++      if (wait)
+               __sync_inodes(1);
+-      }
+ }
+ /**
+--- kernel-maemo-2.6.28.test.orig/fs/fuse/dev.c
++++ kernel-maemo-2.6.28.test/fs/fuse/dev.c
+@@ -281,7 +281,8 @@
+                       fc->blocked = 0;
+                       wake_up_all(&fc->blocked_waitq);
+               }
+-              if (fc->num_background == FUSE_CONGESTION_THRESHOLD) {
++              if (fc->num_background == FUSE_CONGESTION_THRESHOLD &&
++                  fc->connected) {
+                       clear_bdi_congested(&fc->bdi, READ);
+                       clear_bdi_congested(&fc->bdi, WRITE);
+               }
+--- kernel-maemo-2.6.28.test.orig/fs/fuse/file.c
++++ kernel-maemo-2.6.28.test/fs/fuse/file.c
+@@ -54,7 +54,7 @@
+               ff->reserved_req = fuse_request_alloc();
+               if (!ff->reserved_req) {
+                       kfree(ff);
+-                      ff = NULL;
++                      return NULL;
+               } else {
+                       INIT_LIST_HEAD(&ff->write_entry);
+                       atomic_set(&ff->count, 0);
+@@ -646,7 +646,7 @@
+ {
+       pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+-      *pagep = __grab_cache_page(mapping, index);
++      *pagep = grab_cache_page_write_begin(mapping, index, flags);
+       if (!*pagep)
+               return -ENOMEM;
+       return 0;
+@@ -779,7 +779,7 @@
+                       break;
+               err = -ENOMEM;
+-              page = __grab_cache_page(mapping, index);
++              page = grab_cache_page_write_begin(mapping, index, 0);
+               if (!page)
+                       break;
+--- kernel-maemo-2.6.28.test.orig/fs/fuse/inode.c
++++ kernel-maemo-2.6.28.test/fs/fuse/inode.c
+@@ -292,6 +292,7 @@
+       list_del(&fc->entry);
+       fuse_ctl_remove_conn(fc);
+       mutex_unlock(&fuse_mutex);
++      bdi_destroy(&fc->bdi);
+       fuse_conn_put(fc);
+ }
+@@ -531,7 +532,6 @@
+               if (fc->destroy_req)
+                       fuse_request_free(fc->destroy_req);
+               mutex_destroy(&fc->inst_mutex);
+-              bdi_destroy(&fc->bdi);
+               kfree(fc);
+       }
+ }
+@@ -825,12 +825,16 @@
+       if (!file)
+               return -EINVAL;
+-      if (file->f_op != &fuse_dev_operations)
++      if (file->f_op != &fuse_dev_operations) {
++              fput(file);
+               return -EINVAL;
++      }
+       fc = new_conn(sb);
+-      if (!fc)
++      if (!fc) {
++              fput(file);
+               return -ENOMEM;
++      }
+       fc->flags = d.flags;
+       fc->user_id = d.user_id;
+--- kernel-maemo-2.6.28.test.orig/fs/gfs2/ops_address.c
++++ kernel-maemo-2.6.28.test/fs/gfs2/ops_address.c
+@@ -675,7 +675,7 @@
+               goto out_trans_fail;
+       error = -ENOMEM;
+-      page = __grab_cache_page(mapping, index);
++      page = grab_cache_page_write_begin(mapping, index, flags);
+       *pagep = page;
+       if (unlikely(!page))
+               goto out_endtrans;
+--- kernel-maemo-2.6.28.test.orig/fs/hostfs/hostfs_kern.c
++++ kernel-maemo-2.6.28.test/fs/hostfs/hostfs_kern.c
+@@ -501,7 +501,7 @@
+ {
+       pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+-      *pagep = __grab_cache_page(mapping, index);
++      *pagep = grab_cache_page_write_begin(mapping, index, flags);
+       if (!*pagep)
+               return -ENOMEM;
+       return 0;
+--- kernel-maemo-2.6.28.test.orig/fs/hugetlbfs/inode.c
++++ kernel-maemo-2.6.28.test/fs/hugetlbfs/inode.c
+@@ -26,7 +26,6 @@
+ #include <linux/pagevec.h>
+ #include <linux/parser.h>
+ #include <linux/mman.h>
+-#include <linux/quotaops.h>
+ #include <linux/slab.h>
+ #include <linux/dnotify.h>
+ #include <linux/statfs.h>
+@@ -838,7 +837,7 @@
+ bad_val:
+       printk(KERN_ERR "hugetlbfs: Bad value '%s' for mount option '%s'\n",
+              args[0].from, p);
+-      return 1;
++      return -EINVAL;
+ }
+ static int
+--- kernel-maemo-2.6.28.test.orig/fs/inode.c
++++ kernel-maemo-2.6.28.test/fs/inode.c
+@@ -339,6 +339,7 @@
+               invalidate_inode_buffers(inode);
+               if (!atomic_read(&inode->i_count)) {
+                       list_move(&inode->i_list, dispose);
++                      WARN_ON(inode->i_state & I_NEW);
+                       inode->i_state |= I_FREEING;
+                       count++;
+                       continue;
+@@ -440,6 +441,7 @@
+                               continue;
+               }
+               list_move(&inode->i_list, &freeable);
++              WARN_ON(inode->i_state & I_NEW);
+               inode->i_state |= I_FREEING;
+               nr_pruned++;
+       }
+@@ -595,6 +597,7 @@
+        * just created it (so there can be no old holders
+        * that haven't tested I_LOCK).
+        */
++      WARN_ON((inode->i_state & (I_LOCK|I_NEW)) != (I_LOCK|I_NEW));
+       inode->i_state &= ~(I_LOCK|I_NEW);
+       wake_up_inode(inode);
+ }
+@@ -1041,6 +1044,7 @@
+       list_del_init(&inode->i_list);
+       list_del_init(&inode->i_sb_list);
++      WARN_ON(inode->i_state & I_NEW);
+       inode->i_state |= I_FREEING;
+       inodes_stat.nr_inodes--;
+       spin_unlock(&inode_lock);
+@@ -1082,16 +1086,19 @@
+                       spin_unlock(&inode_lock);
+                       return;
+               }
++              WARN_ON(inode->i_state & I_NEW);
+               inode->i_state |= I_WILL_FREE;
+               spin_unlock(&inode_lock);
+               write_inode_now(inode, 1);
+               spin_lock(&inode_lock);
++              WARN_ON(inode->i_state & I_NEW);
+               inode->i_state &= ~I_WILL_FREE;
+               inodes_stat.nr_unused--;
+               hlist_del_init(&inode->i_hash);
+       }
+       list_del_init(&inode->i_list);
+       list_del_init(&inode->i_sb_list);
++      WARN_ON(inode->i_state & I_NEW);
+       inode->i_state |= I_FREEING;
+       inodes_stat.nr_inodes--;
+       spin_unlock(&inode_lock);
+--- kernel-maemo-2.6.28.test.orig/fs/inotify.c
++++ kernel-maemo-2.6.28.test/fs/inotify.c
+@@ -156,7 +156,7 @@
+       int ret;
+       do {
+-              if (unlikely(!idr_pre_get(&ih->idr, GFP_KERNEL)))
++              if (unlikely(!idr_pre_get(&ih->idr, GFP_NOFS)))
+                       return -ENOSPC;
+               ret = idr_get_new_above(&ih->idr, watch, ih->last_wd+1, &watch->wd);
+       } while (ret == -EAGAIN);
+--- kernel-maemo-2.6.28.test.orig/fs/inotify_user.c
++++ kernel-maemo-2.6.28.test/fs/inotify_user.c
+@@ -427,10 +427,61 @@
+       return ret;
+ }
++/*
++ * Get an inotify_kernel_event if one exists and is small
++ * enough to fit in "count". Return an error pointer if
++ * not large enough.
++ *
++ * Called with the device ev_mutex held.
++ */
++static struct inotify_kernel_event *get_one_event(struct inotify_device *dev,
++                                                size_t count)
++{
++      size_t event_size = sizeof(struct inotify_event);
++      struct inotify_kernel_event *kevent;
++
++      if (list_empty(&dev->events))
++              return NULL;
++
++      kevent = inotify_dev_get_event(dev);
++      if (kevent->name)
++              event_size += kevent->event.len;
++
++      if (event_size > count)
++              return ERR_PTR(-EINVAL);
++
++      remove_kevent(dev, kevent);
++      return kevent;
++}
++
++/*
++ * Copy an event to user space, returning how much we copied.
++ *
++ * We already checked that the event size is smaller than the
++ * buffer we had in "get_one_event()" above.
++ */
++static ssize_t copy_event_to_user(struct inotify_kernel_event *kevent,
++                                char __user *buf)
++{
++      size_t event_size = sizeof(struct inotify_event);
++
++      if (copy_to_user(buf, &kevent->event, event_size))
++              return -EFAULT;
++
++      if (kevent->name) {
++              buf += event_size;
++
++              if (copy_to_user(buf, kevent->name, kevent->event.len))
++                      return -EFAULT;
++
++              event_size += kevent->event.len;
++      }
++      return event_size;
++}
++
+ static ssize_t inotify_read(struct file *file, char __user *buf,
+                           size_t count, loff_t *pos)
+ {
+-      size_t event_size = sizeof (struct inotify_event);
+       struct inotify_device *dev;
+       char __user *start;
+       int ret;
+@@ -440,81 +491,43 @@
+       dev = file->private_data;
+       while (1) {
++              struct inotify_kernel_event *kevent;
+               prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE);
+               mutex_lock(&dev->ev_mutex);
+-              if (!list_empty(&dev->events)) {
+-                      ret = 0;
+-                      break;
+-              }
++              kevent = get_one_event(dev, count);
+               mutex_unlock(&dev->ev_mutex);
+-              if (file->f_flags & O_NONBLOCK) {
+-                      ret = -EAGAIN;
+-                      break;
+-              }
+-
+-              if (signal_pending(current)) {
+-                      ret = -EINTR;
+-                      break;
++              if (kevent) {
++                      ret = PTR_ERR(kevent);
++                      if (IS_ERR(kevent))
++                              break;
++                      ret = copy_event_to_user(kevent, buf);
++                      free_kevent(kevent);
++                      if (ret < 0)
++                              break;
++                      buf += ret;
++                      count -= ret;
++                      continue;
+               }
+-              schedule();
+-      }
+-
+-      finish_wait(&dev->wq, &wait);
+-      if (ret)
+-              return ret;
+-
+-      while (1) {
+-              struct inotify_kernel_event *kevent;
+-
+-              ret = buf - start;
+-              if (list_empty(&dev->events))
++              ret = -EAGAIN;
++              if (file->f_flags & O_NONBLOCK)
+                       break;
+-
+-              kevent = inotify_dev_get_event(dev);
+-              if (event_size + kevent->event.len > count) {
+-                      if (ret == 0 && count > 0) {
+-                              /*
+-                               * could not get a single event because we
+-                               * didn't have enough buffer space.
+-                               */
+-                              ret = -EINVAL;
+-                      }
++              ret = -EINTR;
++              if (signal_pending(current))
+                       break;
+-              }
+-              remove_kevent(dev, kevent);
+-              /*
+-               * Must perform the copy_to_user outside the mutex in order
+-               * to avoid a lock order reversal with mmap_sem.
+-               */
+-              mutex_unlock(&dev->ev_mutex);
+-
+-              if (copy_to_user(buf, &kevent->event, event_size)) {
+-                      ret = -EFAULT;
++              if (start != buf)
+                       break;
+-              }
+-              buf += event_size;
+-              count -= event_size;
+-
+-              if (kevent->name) {
+-                      if (copy_to_user(buf, kevent->name, kevent->event.len)){
+-                              ret = -EFAULT;
+-                              break;
+-                      }
+-                      buf += kevent->event.len;
+-                      count -= kevent->event.len;
+-              }
+-              free_kevent(kevent);
+-
+-              mutex_lock(&dev->ev_mutex);
++              schedule();
+       }
+-      mutex_unlock(&dev->ev_mutex);
++      finish_wait(&dev->wq, &wait);
++      if (start != buf && ret != -EFAULT)
++              ret = buf - start;
+       return ret;
+ }
+@@ -576,7 +589,7 @@
+       .destroy_watch  = free_inotify_user_watch,
+ };
+-asmlinkage long sys_inotify_init1(int flags)
++SYSCALL_DEFINE1(inotify_init1, int, flags)
+ {
+       struct inotify_device *dev;
+       struct inotify_handle *ih;
+@@ -655,12 +668,13 @@
+       return ret;
+ }
+-asmlinkage long sys_inotify_init(void)
++SYSCALL_DEFINE0(inotify_init)
+ {
+       return sys_inotify_init1(0);
+ }
+-asmlinkage long sys_inotify_add_watch(int fd, const char __user *pathname, u32 mask)
++SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
++              u32, mask)
+ {
+       struct inode *inode;
+       struct inotify_device *dev;
+@@ -704,7 +718,7 @@
+       return ret;
+ }
+-asmlinkage long sys_inotify_rm_watch(int fd, u32 wd)
++SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
+ {
+       struct file *filp;
+       struct inotify_device *dev;
+--- kernel-maemo-2.6.28.test.orig/fs/ioctl.c
++++ kernel-maemo-2.6.28.test/fs/ioctl.c
+@@ -472,7 +472,7 @@
+       return error;
+ }
+-asmlinkage long sys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
++SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
+ {
+       struct file *filp;
+       int error = -EBADF;
+--- kernel-maemo-2.6.28.test.orig/fs/ioprio.c
++++ kernel-maemo-2.6.28.test/fs/ioprio.c
+@@ -65,7 +65,7 @@
+       return err;
+ }
+-asmlinkage long sys_ioprio_set(int which, int who, int ioprio)
++SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
+ {
+       int class = IOPRIO_PRIO_CLASS(ioprio);
+       int data = IOPRIO_PRIO_DATA(ioprio);
+@@ -181,7 +181,7 @@
+               return aprio;
+ }
+-asmlinkage long sys_ioprio_get(int which, int who)
++SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
+ {
+       struct task_struct *g, *p;
+       struct user_struct *user;
+@@ -245,4 +245,3 @@
+       read_unlock(&tasklist_lock);
+       return ret;
+ }
+-
+--- kernel-maemo-2.6.28.test.orig/fs/jbd2/commit.c
++++ kernel-maemo-2.6.28.test/fs/jbd2/commit.c
+@@ -25,6 +25,7 @@
+ #include <linux/crc32.h>
+ #include <linux/writeback.h>
+ #include <linux/backing-dev.h>
++#include <linux/bio.h>
+ /*
+  * Default IO end handler for temporary BJ_IO buffer_heads.
+@@ -168,12 +169,34 @@
+  * This function along with journal_submit_commit_record
+  * allows to write the commit record asynchronously.
+  */
+-static int journal_wait_on_commit_record(struct buffer_head *bh)
++static int journal_wait_on_commit_record(journal_t *journal,
++                                       struct buffer_head *bh)
+ {
+       int ret = 0;
++retry:
+       clear_buffer_dirty(bh);
+       wait_on_buffer(bh);
++      if (buffer_eopnotsupp(bh) && (journal->j_flags & JBD2_BARRIER)) {
++              printk(KERN_WARNING
++                     "JBD2: wait_on_commit_record: sync failed on %s - "
++                     "disabling barriers\n", journal->j_devname);
++              spin_lock(&journal->j_state_lock);
++              journal->j_flags &= ~JBD2_BARRIER;
++              spin_unlock(&journal->j_state_lock);
++
++              lock_buffer(bh);
++              clear_buffer_dirty(bh);
++              set_buffer_uptodate(bh);
++              bh->b_end_io = journal_end_buffer_io_sync;
++
++              ret = submit_bh(WRITE_SYNC, bh);
++              if (ret) {
++                      unlock_buffer(bh);
++                      return ret;
++              }
++              goto retry;
++      }
+       if (unlikely(!buffer_uptodate(bh)))
+               ret = -EIO;
+@@ -799,7 +822,7 @@
+                       __jbd2_journal_abort_hard(journal);
+       }
+       if (!err && !is_journal_aborted(journal))
+-              err = journal_wait_on_commit_record(cbh);
++              err = journal_wait_on_commit_record(journal, cbh);
+       if (err)
+               jbd2_journal_abort(journal, err);
+--- kernel-maemo-2.6.28.test.orig/fs/jbd2/journal.c
++++ kernel-maemo-2.6.28.test/fs/jbd2/journal.c
+@@ -430,7 +430,7 @@
+ }
+ /*
+- * Called under j_state_lock.  Returns true if a transaction was started.
++ * Called under j_state_lock.  Returns true if a transaction commit was started.
+  */
+ int __jbd2_log_start_commit(journal_t *journal, tid_t target)
+ {
+@@ -498,7 +498,8 @@
+ /*
+  * Start a commit of the current running transaction (if any).  Returns true
+- * if a transaction was started, and fills its tid in at *ptid
++ * if a transaction is going to be committed (or is currently already
++ * committing), and fills its tid in at *ptid
+  */
+ int jbd2_journal_start_commit(journal_t *journal, tid_t *ptid)
+ {
+@@ -508,15 +509,19 @@
+       if (journal->j_running_transaction) {
+               tid_t tid = journal->j_running_transaction->t_tid;
+-              ret = __jbd2_log_start_commit(journal, tid);
+-              if (ret && ptid)
++              __jbd2_log_start_commit(journal, tid);
++              /* There's a running transaction and we've just made sure
++               * it's commit has been scheduled. */
++              if (ptid)
+                       *ptid = tid;
+-      } else if (journal->j_committing_transaction && ptid) {
++              ret = 1;
++      } else if (journal->j_committing_transaction) {
+               /*
+                * If ext3_write_super() recently started a commit, then we
+                * have to wait for completion of that transaction
+                */
+-              *ptid = journal->j_committing_transaction->t_tid;
++              if (ptid)
++                      *ptid = journal->j_committing_transaction->t_tid;
+               ret = 1;
+       }
+       spin_unlock(&journal->j_state_lock);
+--- kernel-maemo-2.6.28.test.orig/fs/jbd2/transaction.c
++++ kernel-maemo-2.6.28.test/fs/jbd2/transaction.c
+@@ -2050,26 +2050,46 @@
+ }
+ /*
+- * This function must be called when inode is journaled in ordered mode
+- * before truncation happens. It starts writeout of truncated part in
+- * case it is in the committing transaction so that we stand to ordered
+- * mode consistency guarantees.
++ * File truncate and transaction commit interact with each other in a
++ * non-trivial way.  If a transaction writing data block A is
++ * committing, we cannot discard the data by truncate until we have
++ * written them.  Otherwise if we crashed after the transaction with
++ * write has committed but before the transaction with truncate has
++ * committed, we could see stale data in block A.  This function is a
++ * helper to solve this problem.  It starts writeout of the truncated
++ * part in case it is in the committing transaction.
++ *
++ * Filesystem code must call this function when inode is journaled in
++ * ordered mode before truncation happens and after the inode has been
++ * placed on orphan list with the new inode size. The second condition
++ * avoids the race that someone writes new data and we start
++ * committing the transaction after this function has been called but
++ * before a transaction for truncate is started (and furthermore it
++ * allows us to optimize the case where the addition to orphan list
++ * happens in the same transaction as write --- we don't have to write
++ * any data in such case).
+  */
+-int jbd2_journal_begin_ordered_truncate(struct jbd2_inode *inode,
++int jbd2_journal_begin_ordered_truncate(journal_t *journal,
++                                      struct jbd2_inode *jinode,
+                                       loff_t new_size)
+ {
+-      journal_t *journal;
+-      transaction_t *commit_trans;
++      transaction_t *inode_trans, *commit_trans;
+       int ret = 0;
+-      if (!inode->i_transaction && !inode->i_next_transaction)
++      /* This is a quick check to avoid locking if not necessary */
++      if (!jinode->i_transaction)
+               goto out;
+-      journal = inode->i_transaction->t_journal;
++      /* Locks are here just to force reading of recent values, it is
++       * enough that the transaction was not committing before we started
++       * a transaction adding the inode to orphan list */
+       spin_lock(&journal->j_state_lock);
+       commit_trans = journal->j_committing_transaction;
+       spin_unlock(&journal->j_state_lock);
+-      if (inode->i_transaction == commit_trans) {
+-              ret = filemap_fdatawrite_range(inode->i_vfs_inode->i_mapping,
++      spin_lock(&journal->j_list_lock);
++      inode_trans = jinode->i_transaction;
++      spin_unlock(&journal->j_list_lock);
++      if (inode_trans == commit_trans) {
++              ret = filemap_fdatawrite_range(jinode->i_vfs_inode->i_mapping,
+                       new_size, LLONG_MAX);
+               if (ret)
+                       jbd2_journal_abort(journal, ret);
+--- kernel-maemo-2.6.28.test.orig/fs/jffs2/file.c
++++ kernel-maemo-2.6.28.test/fs/jffs2/file.c
+@@ -132,7 +132,7 @@
+       uint32_t pageofs = index << PAGE_CACHE_SHIFT;
+       int ret = 0;
+-      pg = __grab_cache_page(mapping, index);
++      pg = grab_cache_page_write_begin(mapping, index, flags);
+       if (!pg)
+               return -ENOMEM;
+       *pagep = pg;
+--- kernel-maemo-2.6.28.test.orig/fs/jffs2/readinode.c
++++ kernel-maemo-2.6.28.test/fs/jffs2/readinode.c
+@@ -220,7 +220,7 @@
+                               struct jffs2_tmp_dnode_info *tn)
+ {
+       uint32_t fn_end = tn->fn->ofs + tn->fn->size;
+-      struct jffs2_tmp_dnode_info *this;
++      struct jffs2_tmp_dnode_info *this, *ptn;
+       dbg_readinode("insert fragment %#04x-%#04x, ver %u at %08x\n", tn->fn->ofs, fn_end, tn->version, ref_offset(tn->fn->raw));
+@@ -251,11 +251,18 @@
+       if (this) {
+               /* If the node is coincident with another at a lower address,
+                  back up until the other node is found. It may be relevant */
+-              while (this->overlapped)
+-                      this = tn_prev(this);
+-
+-              /* First node should never be marked overlapped */
+-              BUG_ON(!this);
++              while (this->overlapped) {
++                      ptn = tn_prev(this);
++                      if (!ptn) {
++                              /*
++                               * We killed a node which set the overlapped
++                               * flags during the scan. Fix it up.
++                               */
++                              this->overlapped = 0;
++                              break;
++                      }
++                      this = ptn;
++              }
+               dbg_readinode("'this' found %#04x-%#04x (%s)\n", this->fn->ofs, this->fn->ofs + this->fn->size, this->fn ? "data" : "hole");
+       }
+@@ -360,7 +367,17 @@
+                       }
+                       if (!this->overlapped)
+                               break;
+-                      this = tn_prev(this);
++
++                      ptn = tn_prev(this);
++                      if (!ptn) {
++                              /*
++                               * We killed a node which set the overlapped
++                               * flags during the scan. Fix it up.
++                               */
++                              this->overlapped = 0;
++                              break;
++                      }
++                      this = ptn;
+               }
+       }
+@@ -456,8 +473,15 @@
+               eat_last(&rii->tn_root, &last->rb);
+               ver_insert(&ver_root, last);
+-              if (unlikely(last->overlapped))
+-                      continue;
++              if (unlikely(last->overlapped)) {
++                      if (pen)
++                              continue;
++                      /*
++                       * We killed a node which set the overlapped
++                       * flags during the scan. Fix it up.
++                       */
++                      last->overlapped = 0;
++              }
+               /* Now we have a bunch of nodes in reverse version
+                  order, in the tree at ver_root. Most of the time,
+--- kernel-maemo-2.6.28.test.orig/fs/libfs.c
++++ kernel-maemo-2.6.28.test/fs/libfs.c
+@@ -360,7 +360,7 @@
+       index = pos >> PAGE_CACHE_SHIFT;
+       from = pos & (PAGE_CACHE_SIZE - 1);
+-      page = __grab_cache_page(mapping, index);
++      page = grab_cache_page_write_begin(mapping, index, flags);
+       if (!page)
+               return -ENOMEM;
+--- kernel-maemo-2.6.28.test.orig/fs/lockd/svclock.c
++++ kernel-maemo-2.6.28.test/fs/lockd/svclock.c
+@@ -427,7 +427,7 @@
+                       goto out;
+               case -EAGAIN:
+                       ret = nlm_lck_denied;
+-                      goto out;
++                      break;
+               case FILE_LOCK_DEFERRED:
+                       if (wait)
+                               break;
+@@ -443,6 +443,10 @@
+                       goto out;
+       }
++      ret = nlm_lck_denied;
++      if (!wait)
++              goto out;
++
+       ret = nlm_lck_blocked;
+       /* Append to list of blocked */
+--- kernel-maemo-2.6.28.test.orig/fs/locks.c
++++ kernel-maemo-2.6.28.test/fs/locks.c
+@@ -1564,7 +1564,7 @@
+  *    %LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other
+  *    processes read and write access respectively.
+  */
+-asmlinkage long sys_flock(unsigned int fd, unsigned int cmd)
++SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
+ {
+       struct file *filp;
+       struct file_lock *lock;
+--- kernel-maemo-2.6.28.test.orig/fs/minix/dir.c
++++ kernel-maemo-2.6.28.test/fs/minix/dir.c
+@@ -280,7 +280,7 @@
+       return -EINVAL;
+ got_it:
+-      pos = (page->index >> PAGE_CACHE_SHIFT) + p - (char*)page_address(page);
++      pos = page_offset(page) + p - (char *)page_address(page);
+       err = __minix_write_begin(NULL, page->mapping, pos, sbi->s_dirsize,
+                                       AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
+       if (err)
+--- kernel-maemo-2.6.28.test.orig/fs/namei.c
++++ kernel-maemo-2.6.28.test/fs/namei.c
+@@ -1970,8 +1970,8 @@
+       }
+ }
+-asmlinkage long sys_mknodat(int dfd, const char __user *filename, int mode,
+-                              unsigned dev)
++SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
++              unsigned, dev)
+ {
+       int error;
+       char *tmp;
+@@ -2021,7 +2021,7 @@
+       return error;
+ }
+-asmlinkage long sys_mknod(const char __user *filename, int mode, unsigned dev)
++SYSCALL_DEFINE3(mknod, const char __user *, filename, int, mode, unsigned, dev)
+ {
+       return sys_mknodat(AT_FDCWD, filename, mode, dev);
+ }
+@@ -2048,7 +2048,7 @@
+       return error;
+ }
+-asmlinkage long sys_mkdirat(int dfd, const char __user *pathname, int mode)
++SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
+ {
+       int error = 0;
+       char * tmp;
+@@ -2081,7 +2081,7 @@
+       return error;
+ }
+-asmlinkage long sys_mkdir(const char __user *pathname, int mode)
++SYSCALL_DEFINE2(mkdir, const char __user *, pathname, int, mode)
+ {
+       return sys_mkdirat(AT_FDCWD, pathname, mode);
+ }
+@@ -2191,7 +2191,7 @@
+       return error;
+ }
+-asmlinkage long sys_rmdir(const char __user *pathname)
++SYSCALL_DEFINE1(rmdir, const char __user *, pathname)
+ {
+       return do_rmdir(AT_FDCWD, pathname);
+ }
+@@ -2283,7 +2283,7 @@
+       goto exit2;
+ }
+-asmlinkage long sys_unlinkat(int dfd, const char __user *pathname, int flag)
++SYSCALL_DEFINE3(unlinkat, int, dfd, const char __user *, pathname, int, flag)
+ {
+       if ((flag & ~AT_REMOVEDIR) != 0)
+               return -EINVAL;
+@@ -2294,7 +2294,7 @@
+       return do_unlinkat(dfd, pathname);
+ }
+-asmlinkage long sys_unlink(const char __user *pathname)
++SYSCALL_DEFINE1(unlink, const char __user *, pathname)
+ {
+       return do_unlinkat(AT_FDCWD, pathname);
+ }
+@@ -2320,8 +2320,8 @@
+       return error;
+ }
+-asmlinkage long sys_symlinkat(const char __user *oldname,
+-                            int newdfd, const char __user *newname)
++SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
++              int, newdfd, const char __user *, newname)
+ {
+       int error;
+       char *from;
+@@ -2358,7 +2358,7 @@
+       return error;
+ }
+-asmlinkage long sys_symlink(const char __user *oldname, const char __user *newname)
++SYSCALL_DEFINE2(symlink, const char __user *, oldname, const char __user *, newname)
+ {
+       return sys_symlinkat(oldname, AT_FDCWD, newname);
+ }
+@@ -2410,9 +2410,8 @@
+  * with linux 2.0, and to avoid hard-linking to directories
+  * and other special files.  --ADM
+  */
+-asmlinkage long sys_linkat(int olddfd, const char __user *oldname,
+-                         int newdfd, const char __user *newname,
+-                         int flags)
++SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
++              int, newdfd, const char __user *, newname, int, flags)
+ {
+       struct dentry *new_dentry;
+       struct nameidata nd;
+@@ -2457,7 +2456,7 @@
+       return error;
+ }
+-asmlinkage long sys_link(const char __user *oldname, const char __user *newname)
++SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname)
+ {
+       return sys_linkat(AT_FDCWD, oldname, AT_FDCWD, newname, 0);
+ }
+@@ -2608,8 +2607,8 @@
+       return error;
+ }
+-asmlinkage long sys_renameat(int olddfd, const char __user *oldname,
+-                           int newdfd, const char __user *newname)
++SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
++              int, newdfd, const char __user *, newname)
+ {
+       struct dentry *old_dir, *new_dir;
+       struct dentry *old_dentry, *new_dentry;
+@@ -2697,7 +2696,7 @@
+       return error;
+ }
+-asmlinkage long sys_rename(const char __user *oldname, const char __user *newname)
++SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newname)
+ {
+       return sys_renameat(AT_FDCWD, oldname, AT_FDCWD, newname);
+ }
+@@ -2787,18 +2786,23 @@
+       }
+ }
+-int __page_symlink(struct inode *inode, const char *symname, int len,
+-              gfp_t gfp_mask)
++/*
++ * The nofs argument instructs pagecache_write_begin to pass AOP_FLAG_NOFS
++ */
++int __page_symlink(struct inode *inode, const char *symname, int len, int nofs)
+ {
+       struct address_space *mapping = inode->i_mapping;
+       struct page *page;
+       void *fsdata;
+       int err;
+       char *kaddr;
++      unsigned int flags = AOP_FLAG_UNINTERRUPTIBLE;
++      if (nofs)
++              flags |= AOP_FLAG_NOFS;
+ retry:
+       err = pagecache_write_begin(NULL, mapping, 0, len-1,
+-                              AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata);
++                              flags, &page, &fsdata);
+       if (err)
+               goto fail;
+@@ -2822,7 +2826,7 @@
+ int page_symlink(struct inode *inode, const char *symname, int len)
+ {
+       return __page_symlink(inode, symname, len,
+-                      mapping_gfp_mask(inode->i_mapping));
++                      !(mapping_gfp_mask(inode->i_mapping) & __GFP_FS));
+ }
+ const struct inode_operations page_symlink_inode_operations = {
+--- kernel-maemo-2.6.28.test.orig/fs/namespace.c
++++ kernel-maemo-2.6.28.test/fs/namespace.c
+@@ -1128,7 +1128,7 @@
+  * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
+  */
+-asmlinkage long sys_umount(char __user * name, int flags)
++SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
+ {
+       struct path path;
+       int retval;
+@@ -1160,7 +1160,7 @@
+ /*
+  *    The 2.0 compatible umount. No flags.
+  */
+-asmlinkage long sys_oldumount(char __user * name)
++SYSCALL_DEFINE1(oldumount, char __user *, name)
+ {
+       return sys_umount(name, 0);
+ }
+@@ -2045,9 +2045,8 @@
+       return new_ns;
+ }
+-asmlinkage long sys_mount(char __user * dev_name, char __user * dir_name,
+-                        char __user * type, unsigned long flags,
+-                        void __user * data)
++SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
++              char __user *, type, unsigned long, flags, void __user *, data)
+ {
+       int retval;
+       unsigned long data_page;
+@@ -2172,8 +2171,8 @@
+  *    though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
+  *    first.
+  */
+-asmlinkage long sys_pivot_root(const char __user * new_root,
+-                             const char __user * put_old)
++SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
++              const char __user *, put_old)
+ {
+       struct vfsmount *tmp;
+       struct path new, old, parent_path, root_parent, root;
+--- kernel-maemo-2.6.28.test.orig/fs/nfs/file.c
++++ kernel-maemo-2.6.28.test/fs/nfs/file.c
+@@ -354,7 +354,7 @@
+               file->f_path.dentry->d_name.name,
+               mapping->host->i_ino, len, (long long) pos);
+-      page = __grab_cache_page(mapping, index);
++      page = grab_cache_page_write_begin(mapping, index, flags);
+       if (!page)
+               return -ENOMEM;
+       *pagep = page;
+--- kernel-maemo-2.6.28.test.orig/fs/nfs/read.c
++++ kernel-maemo-2.6.28.test/fs/nfs/read.c
+@@ -533,12 +533,6 @@
+       unsigned int len;
+       int error;
+-      error = nfs_wb_page(inode, page);
+-      if (error)
+-              goto out_unlock;
+-      if (PageUptodate(page))
+-              goto out_unlock;
+-
+       len = nfs_page_length(page);
+       if (len == 0)
+               return nfs_return_empty_page(page);
+--- kernel-maemo-2.6.28.test.orig/fs/nfsctl.c
++++ kernel-maemo-2.6.28.test/fs/nfsctl.c
+@@ -82,8 +82,8 @@
+       },
+ };
+-long
+-asmlinkage sys_nfsservctl(int cmd, struct nfsctl_arg __user *arg, void __user *res)
++SYSCALL_DEFINE3(nfsservctl, int, cmd, struct nfsctl_arg __user *, arg,
++              void __user *, res)
+ {
+       struct file *file;
+       void __user *p = &arg->u;
+--- kernel-maemo-2.6.28.test.orig/fs/nfsd/nfs4state.c
++++ kernel-maemo-2.6.28.test/fs/nfsd/nfs4state.c
+@@ -2769,6 +2769,25 @@
+ }
+ /*
++ * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
++ * so we do a temporary open here just to get an open file to pass to
++ * vfs_test_lock.  (Arguably perhaps test_lock should be done with an
++ * inode operation.)
++ */
++static int nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
++{
++      struct file *file;
++      int err;
++
++      err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
++      if (err)
++              return err;
++      err = vfs_test_lock(file, lock);
++      nfsd_close(file);
++      return err;
++}
++
++/*
+  * LOCKT operation
+  */
+ __be32
+@@ -2776,7 +2795,6 @@
+           struct nfsd4_lockt *lockt)
+ {
+       struct inode *inode;
+-      struct file file;
+       struct file_lock file_lock;
+       int error;
+       __be32 status;
+@@ -2824,7 +2842,6 @@
+               file_lock.fl_owner = (fl_owner_t)lockt->lt_stateowner;
+       file_lock.fl_pid = current->tgid;
+       file_lock.fl_flags = FL_POSIX;
+-      file_lock.fl_lmops = &nfsd_posix_mng_ops;
+       file_lock.fl_start = lockt->lt_offset;
+       if ((lockt->lt_length == ~(u64)0) || LOFF_OVERFLOW(lockt->lt_offset, lockt->lt_length))
+@@ -2834,16 +2851,8 @@
+       nfs4_transform_lock_offset(&file_lock);
+-      /* vfs_test_lock uses the struct file _only_ to resolve the inode.
+-       * since LOCKT doesn't require an OPEN, and therefore a struct
+-       * file may not exist, pass vfs_test_lock a struct file with
+-       * only the dentry:inode set.
+-       */
+-      memset(&file, 0, sizeof (struct file));
+-      file.f_path.dentry = cstate->current_fh.fh_dentry;
+-
+       status = nfs_ok;
+-      error = vfs_test_lock(&file, &file_lock);
++      error = nfsd_test_lock(rqstp, &cstate->current_fh, &file_lock);
+       if (error) {
+               status = nfserrno(error);
+               goto out;
+--- kernel-maemo-2.6.28.test.orig/fs/nfsd/nfs4xdr.c
++++ kernel-maemo-2.6.28.test/fs/nfsd/nfs4xdr.c
+@@ -2598,6 +2598,7 @@
+       [OP_LOOKUPP]            = (nfsd4_enc)nfsd4_encode_noop,
+       [OP_NVERIFY]            = (nfsd4_enc)nfsd4_encode_noop,
+       [OP_OPEN]               = (nfsd4_enc)nfsd4_encode_open,
++      [OP_OPENATTR]           = (nfsd4_enc)nfsd4_encode_noop,
+       [OP_OPEN_CONFIRM]       = (nfsd4_enc)nfsd4_encode_open_confirm,
+       [OP_OPEN_DOWNGRADE]     = (nfsd4_enc)nfsd4_encode_open_downgrade,
+       [OP_PUTFH]              = (nfsd4_enc)nfsd4_encode_noop,
+--- kernel-maemo-2.6.28.test.orig/fs/ocfs2/file.c
++++ kernel-maemo-2.6.28.test/fs/ocfs2/file.c
+@@ -1943,7 +1943,7 @@
+                  out->f_path.dentry->d_name.len,
+                  out->f_path.dentry->d_name.name);
+-      inode_double_lock(inode, pipe->inode);
++      mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
+       ret = ocfs2_rw_lock(inode, 1);
+       if (ret < 0) {
+@@ -1958,12 +1958,16 @@
+               goto out_unlock;
+       }
++      if (pipe->inode)
++              mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_CHILD);
+       ret = generic_file_splice_write_nolock(pipe, out, ppos, len, flags);
++      if (pipe->inode)
++              mutex_unlock(&pipe->inode->i_mutex);
+ out_unlock:
+       ocfs2_rw_unlock(inode, 1);
+ out:
+-      inode_double_unlock(inode, pipe->inode);
++      mutex_unlock(&inode->i_mutex);
+       mlog_exit(ret);
+       return ret;
+--- kernel-maemo-2.6.28.test.orig/fs/ocfs2/journal.h
++++ kernel-maemo-2.6.28.test/fs/ocfs2/journal.h
+@@ -445,8 +445,10 @@
+ static inline int ocfs2_begin_ordered_truncate(struct inode *inode,
+                                              loff_t new_size)
+ {
+-      return jbd2_journal_begin_ordered_truncate(&OCFS2_I(inode)->ip_jinode,
+-                                                 new_size);
++      return jbd2_journal_begin_ordered_truncate(
++                              OCFS2_SB(inode->i_sb)->journal->j_journal,
++                              &OCFS2_I(inode)->ip_jinode,
++                              new_size);
+ }
+ #endif /* OCFS2_JOURNAL_H */
+--- kernel-maemo-2.6.28.test.orig/fs/open.c
++++ kernel-maemo-2.6.28.test/fs/open.c
+@@ -122,7 +122,7 @@
+       return 0;
+ }
+-asmlinkage long sys_statfs(const char __user *pathname, struct statfs __user * buf)
++SYSCALL_DEFINE2(statfs, const char __user *, pathname, struct statfs __user *, buf)
+ {
+       struct path path;
+       int error;
+@@ -138,8 +138,7 @@
+       return error;
+ }
+-
+-asmlinkage long sys_statfs64(const char __user *pathname, size_t sz, struct statfs64 __user *buf)
++SYSCALL_DEFINE3(statfs64, const char __user *, pathname, size_t, sz, struct statfs64 __user *, buf)
+ {
+       struct path path;
+       long error;
+@@ -157,8 +156,7 @@
+       return error;
+ }
+-
+-asmlinkage long sys_fstatfs(unsigned int fd, struct statfs __user * buf)
++SYSCALL_DEFINE2(fstatfs, unsigned int, fd, struct statfs __user *, buf)
+ {
+       struct file * file;
+       struct statfs tmp;
+@@ -176,7 +174,7 @@
+       return error;
+ }
+-asmlinkage long sys_fstatfs64(unsigned int fd, size_t sz, struct statfs64 __user *buf)
++SYSCALL_DEFINE3(fstatfs64, unsigned int, fd, size_t, sz, struct statfs64 __user *, buf)
+ {
+       struct file * file;
+       struct statfs64 tmp;
+@@ -287,7 +285,7 @@
+       return error;
+ }
+-asmlinkage long sys_truncate(const char __user * path, unsigned long length)
++SYSCALL_DEFINE2(truncate, const char __user *, path, unsigned long, length)
+ {
+       /* on 32-bit boxen it will cut the range 2^31--2^32-1 off */
+       return do_sys_truncate(path, (long)length);
+@@ -336,7 +334,7 @@
+       return error;
+ }
+-asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length)
++SYSCALL_DEFINE2(ftruncate, unsigned int, fd, unsigned long, length)
+ {
+       long ret = do_sys_ftruncate(fd, length, 1);
+       /* avoid REGPARM breakage on x86: */
+@@ -346,21 +344,35 @@
+ /* LFS versions of truncate are only needed on 32 bit machines */
+ #if BITS_PER_LONG == 32
+-asmlinkage long sys_truncate64(const char __user * path, loff_t length)
++SYSCALL_DEFINE(truncate64)(const char __user * path, loff_t length)
+ {
+       return do_sys_truncate(path, length);
+ }
++#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
++asmlinkage long SyS_truncate64(long path, loff_t length)
++{
++      return SYSC_truncate64((const char __user *) path, length);
++}
++SYSCALL_ALIAS(sys_truncate64, SyS_truncate64);
++#endif
+-asmlinkage long sys_ftruncate64(unsigned int fd, loff_t length)
++SYSCALL_DEFINE(ftruncate64)(unsigned int fd, loff_t length)
+ {
+       long ret = do_sys_ftruncate(fd, length, 0);
+       /* avoid REGPARM breakage on x86: */
+       asmlinkage_protect(2, ret, fd, length);
+       return ret;
+ }
++#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
++asmlinkage long SyS_ftruncate64(long fd, loff_t length)
++{
++      return SYSC_ftruncate64((unsigned int) fd, length);
++}
++SYSCALL_ALIAS(sys_ftruncate64, SyS_ftruncate64);
+ #endif
++#endif /* BITS_PER_LONG == 32 */
+-asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len)
++SYSCALL_DEFINE(fallocate)(int fd, int mode, loff_t offset, loff_t len)
+ {
+       struct file *file;
+       struct inode *inode;
+@@ -417,13 +429,20 @@
+ out:
+       return ret;
+ }
++#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
++asmlinkage long SyS_fallocate(long fd, long mode, loff_t offset, loff_t len)
++{
++      return SYSC_fallocate((int)fd, (int)mode, offset, len);
++}
++SYSCALL_ALIAS(sys_fallocate, SyS_fallocate);
++#endif
+ /*
+  * access() needs to use the real uid/gid, not the effective uid/gid.
+  * We do this by temporarily clearing all FS-related capabilities and
+  * switching the fsuid/fsgid around to the real ones.
+  */
+-asmlinkage long sys_faccessat(int dfd, const char __user *filename, int mode)
++SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
+ {
+       struct path path;
+       struct inode *inode;
+@@ -503,12 +522,12 @@
+       return res;
+ }
+-asmlinkage long sys_access(const char __user *filename, int mode)
++SYSCALL_DEFINE2(access, const char __user *, filename, int, mode)
+ {
+       return sys_faccessat(AT_FDCWD, filename, mode);
+ }
+-asmlinkage long sys_chdir(const char __user * filename)
++SYSCALL_DEFINE1(chdir, const char __user *, filename)
+ {
+       struct path path;
+       int error;
+@@ -529,7 +548,7 @@
+       return error;
+ }
+-asmlinkage long sys_fchdir(unsigned int fd)
++SYSCALL_DEFINE1(fchdir, unsigned int, fd)
+ {
+       struct file *file;
+       struct inode *inode;
+@@ -555,7 +574,7 @@
+       return error;
+ }
+-asmlinkage long sys_chroot(const char __user * filename)
++SYSCALL_DEFINE1(chroot, const char __user *, filename)
+ {
+       struct path path;
+       int error;
+@@ -580,7 +599,7 @@
+       return error;
+ }
+-asmlinkage long sys_fchmod(unsigned int fd, mode_t mode)
++SYSCALL_DEFINE2(fchmod, unsigned int, fd, mode_t, mode)
+ {
+       struct inode * inode;
+       struct dentry * dentry;
+@@ -614,8 +633,7 @@
+       return err;
+ }
+-asmlinkage long sys_fchmodat(int dfd, const char __user *filename,
+-                           mode_t mode)
++SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, mode_t, mode)
+ {
+       struct path path;
+       struct inode *inode;
+@@ -644,7 +662,7 @@
+       return error;
+ }
+-asmlinkage long sys_chmod(const char __user *filename, mode_t mode)
++SYSCALL_DEFINE2(chmod, const char __user *, filename, mode_t, mode)
+ {
+       return sys_fchmodat(AT_FDCWD, filename, mode);
+ }
+@@ -674,7 +692,7 @@
+       return error;
+ }
+-asmlinkage long sys_chown(const char __user * filename, uid_t user, gid_t group)
++SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group)
+ {
+       struct path path;
+       int error;
+@@ -693,8 +711,8 @@
+       return error;
+ }
+-asmlinkage long sys_fchownat(int dfd, const char __user *filename, uid_t user,
+-                           gid_t group, int flag)
++SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user,
++              gid_t, group, int, flag)
+ {
+       struct path path;
+       int error = -EINVAL;
+@@ -718,7 +736,7 @@
+       return error;
+ }
+-asmlinkage long sys_lchown(const char __user * filename, uid_t user, gid_t group)
++SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group)
+ {
+       struct path path;
+       int error;
+@@ -737,8 +755,7 @@
+       return error;
+ }
+-
+-asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group)
++SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
+ {
+       struct file * file;
+       int error = -EBADF;
+@@ -1029,7 +1046,7 @@
+       return fd;
+ }
+-asmlinkage long sys_open(const char __user *filename, int flags, int mode)
++SYSCALL_DEFINE3(open, const char __user *, filename, int, flags, int, mode)
+ {
+       long ret;
+@@ -1042,8 +1059,8 @@
+       return ret;
+ }
+-asmlinkage long sys_openat(int dfd, const char __user *filename, int flags,
+-                         int mode)
++SYSCALL_DEFINE4(openat, int, dfd, const char __user *, filename, int, flags,
++              int, mode)
+ {
+       long ret;
+@@ -1062,7 +1079,7 @@
+  * For backward compatibility?  Maybe this should be moved
+  * into arch/i386 instead?
+  */
+-asmlinkage long sys_creat(const char __user * pathname, int mode)
++SYSCALL_DEFINE2(creat, const char __user *, pathname, int, mode)
+ {
+       return sys_open(pathname, O_CREAT | O_WRONLY | O_TRUNC, mode);
+ }
+@@ -1098,7 +1115,7 @@
+  * releasing the fd. This ensures that one clone task can't release
+  * an fd while another clone is opening it.
+  */
+-asmlinkage long sys_close(unsigned int fd)
++SYSCALL_DEFINE1(close, unsigned int, fd)
+ {
+       struct file * filp;
+       struct files_struct *files = current->files;
+@@ -1131,14 +1148,13 @@
+       spin_unlock(&files->file_lock);
+       return -EBADF;
+ }
+-
+ EXPORT_SYMBOL(sys_close);
+ /*
+  * This routine simulates a hangup on the tty, to arrange that users
+  * are given clean terminals at login time.
+  */
+-asmlinkage long sys_vhangup(void)
++SYSCALL_DEFINE0(vhangup)
+ {
+       if (capable(CAP_SYS_TTY_CONFIG)) {
+               tty_vhangup_self();
+--- kernel-maemo-2.6.28.test.orig/fs/pipe.c
++++ kernel-maemo-2.6.28.test/fs/pipe.c
+@@ -699,12 +699,12 @@
+       int retval;
+       mutex_lock(&inode->i_mutex);
+-
+       retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
+-
+-      if (retval >= 0)
++      if (retval >= 0) {
+               retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
+-
++              if (retval < 0) /* this can happen only if on == T */
++                      fasync_helper(-1, filp, 0, &pipe->fasync_readers);
++      }
+       mutex_unlock(&inode->i_mutex);
+       if (retval < 0)
+@@ -1048,7 +1048,7 @@
+  * sys_pipe() is the normal C calling standard for creating
+  * a pipe. It's not the way Unix traditionally does this, though.
+  */
+-asmlinkage long __weak sys_pipe2(int __user *fildes, int flags)
++SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
+ {
+       int fd[2];
+       int error;
+@@ -1064,7 +1064,7 @@
+       return error;
+ }
+-asmlinkage long __weak sys_pipe(int __user *fildes)
++SYSCALL_DEFINE1(pipe, int __user *, fildes)
+ {
+       return sys_pipe2(fildes, 0);
+ }
+--- kernel-maemo-2.6.28.test.orig/fs/proc/base.c
++++ kernel-maemo-2.6.28.test/fs/proc/base.c
+@@ -148,15 +148,22 @@
+       return count;
+ }
+-static struct fs_struct *get_fs_struct(struct task_struct *task)
++static int get_fs_path(struct task_struct *task, struct path *path, bool root)
+ {
+       struct fs_struct *fs;
++      int result = -ENOENT;
++
+       task_lock(task);
+       fs = task->fs;
+-      if(fs)
+-              atomic_inc(&fs->count);
++      if (fs) {
++              read_lock(&fs->lock);
++              *path = root ? fs->root : fs->pwd;
++              path_get(path);
++              read_unlock(&fs->lock);
++              result = 0;
++      }
+       task_unlock(task);
+-      return fs;
++      return result;
+ }
+ static int get_nr_threads(struct task_struct *tsk)
+@@ -174,42 +181,24 @@
+ static int proc_cwd_link(struct inode *inode, struct path *path)
+ {
+       struct task_struct *task = get_proc_task(inode);
+-      struct fs_struct *fs = NULL;
+       int result = -ENOENT;
+       if (task) {
+-              fs = get_fs_struct(task);
++              result = get_fs_path(task, path, 0);
+               put_task_struct(task);
+       }
+-      if (fs) {
+-              read_lock(&fs->lock);
+-              *path = fs->pwd;
+-              path_get(&fs->pwd);
+-              read_unlock(&fs->lock);
+-              result = 0;
+-              put_fs_struct(fs);
+-      }
+       return result;
+ }
+ static int proc_root_link(struct inode *inode, struct path *path)
+ {
+       struct task_struct *task = get_proc_task(inode);
+-      struct fs_struct *fs = NULL;
+       int result = -ENOENT;
+       if (task) {
+-              fs = get_fs_struct(task);
++              result = get_fs_path(task, path, 1);
+               put_task_struct(task);
+       }
+-      if (fs) {
+-              read_lock(&fs->lock);
+-              *path = fs->root;
+-              path_get(&fs->root);
+-              read_unlock(&fs->lock);
+-              result = 0;
+-              put_fs_struct(fs);
+-      }
+       return result;
+ }
+@@ -567,7 +556,6 @@
+       struct task_struct *task = get_proc_task(inode);
+       struct nsproxy *nsp;
+       struct mnt_namespace *ns = NULL;
+-      struct fs_struct *fs = NULL;
+       struct path root;
+       struct proc_mounts *p;
+       int ret = -EINVAL;
+@@ -581,22 +569,16 @@
+                               get_mnt_ns(ns);
+               }
+               rcu_read_unlock();
+-              if (ns)
+-                      fs = get_fs_struct(task);
++              if (ns && get_fs_path(task, &root, 1) == 0)
++                      ret = 0;
+               put_task_struct(task);
+       }
+       if (!ns)
+               goto err;
+-      if (!fs)
++      if (ret)
+               goto err_put_ns;
+-      read_lock(&fs->lock);
+-      root = fs->root;
+-      path_get(&root);
+-      read_unlock(&fs->lock);
+-      put_fs_struct(fs);
+-
+       ret = -ENOMEM;
+       p = kmalloc(sizeof(struct proc_mounts), GFP_KERNEL);
+       if (!p)
+--- kernel-maemo-2.6.28.test.orig/fs/proc/page.c
++++ kernel-maemo-2.6.28.test/fs/proc/page.c
+@@ -80,7 +80,7 @@
+ #define KPF_RECLAIM    9
+ #define KPF_BUDDY     10
+-#define kpf_copy_bit(flags, srcpos, dstpos) (((flags >> srcpos) & 1) << dstpos)
++#define kpf_copy_bit(flags, dstpos, srcpos) (((flags >> srcpos) & 1) << dstpos)
+ static ssize_t kpageflags_read(struct file *file, char __user *buf,
+                            size_t count, loff_t *ppos)
+@@ -107,7 +107,7 @@
+               else
+                       kflags = ppage->flags;
+-              uflags = kpf_copy_bit(KPF_LOCKED, PG_locked, kflags) |
++              uflags = kpf_copy_bit(kflags, KPF_LOCKED, PG_locked) |
+                       kpf_copy_bit(kflags, KPF_ERROR, PG_error) |
+                       kpf_copy_bit(kflags, KPF_REFERENCED, PG_referenced) |
+                       kpf_copy_bit(kflags, KPF_UPTODATE, PG_uptodate) |
+--- kernel-maemo-2.6.28.test.orig/fs/quota.c
++++ kernel-maemo-2.6.28.test/fs/quota.c
+@@ -368,7 +368,8 @@
+  * calls. Maybe we need to add the process quotas etc. in the future,
+  * but we probably should use rlimits for that.
+  */
+-asmlinkage long sys_quotactl(unsigned int cmd, const char __user *special, qid_t id, void __user *addr)
++SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special,
++              qid_t, id, void __user *, addr)
+ {
+       uint cmds, type;
+       struct super_block *sb = NULL;
+--- kernel-maemo-2.6.28.test.orig/fs/read_write.c
++++ kernel-maemo-2.6.28.test/fs/read_write.c
+@@ -134,7 +134,7 @@
+ }
+ EXPORT_SYMBOL(vfs_llseek);
+-asmlinkage off_t sys_lseek(unsigned int fd, off_t offset, unsigned int origin)
++SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, origin)
+ {
+       off_t retval;
+       struct file * file;
+@@ -158,9 +158,9 @@
+ }
+ #ifdef __ARCH_WANT_SYS_LLSEEK
+-asmlinkage long sys_llseek(unsigned int fd, unsigned long offset_high,
+-                         unsigned long offset_low, loff_t __user * result,
+-                         unsigned int origin)
++SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high,
++              unsigned long, offset_low, loff_t __user *, result,
++              unsigned int, origin)
+ {
+       int retval;
+       struct file * file;
+@@ -356,7 +356,7 @@
+       file->f_pos = pos;
+ }
+-asmlinkage ssize_t sys_read(unsigned int fd, char __user * buf, size_t count)
++SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count)
+ {
+       struct file *file;
+       ssize_t ret = -EBADF;
+@@ -373,7 +373,8 @@
+       return ret;
+ }
+-asmlinkage ssize_t sys_write(unsigned int fd, const char __user * buf, size_t count)
++SYSCALL_DEFINE3(write, unsigned int, fd, const char __user *, buf,
++              size_t, count)
+ {
+       struct file *file;
+       ssize_t ret = -EBADF;
+@@ -390,8 +391,8 @@
+       return ret;
+ }
+-asmlinkage ssize_t sys_pread64(unsigned int fd, char __user *buf,
+-                           size_t count, loff_t pos)
++SYSCALL_DEFINE(pread64)(unsigned int fd, char __user *buf,
++                      size_t count, loff_t pos)
+ {
+       struct file *file;
+       ssize_t ret = -EBADF;
+@@ -410,9 +411,17 @@
+       return ret;
+ }
++#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
++asmlinkage long SyS_pread64(long fd, long buf, long count, loff_t pos)
++{
++      return SYSC_pread64((unsigned int) fd, (char __user *) buf,
++                          (size_t) count, pos);
++}
++SYSCALL_ALIAS(sys_pread64, SyS_pread64);
++#endif
+-asmlinkage ssize_t sys_pwrite64(unsigned int fd, const char __user *buf,
+-                            size_t count, loff_t pos)
++SYSCALL_DEFINE(pwrite64)(unsigned int fd, const char __user *buf,
++                       size_t count, loff_t pos)
+ {
+       struct file *file;
+       ssize_t ret = -EBADF;
+@@ -431,6 +440,14 @@
+       return ret;
+ }
++#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
++asmlinkage long SyS_pwrite64(long fd, long buf, long count, loff_t pos)
++{
++      return SYSC_pwrite64((unsigned int) fd, (const char __user *) buf,
++                           (size_t) count, pos);
++}
++SYSCALL_ALIAS(sys_pwrite64, SyS_pwrite64);
++#endif
+ /*
+  * Reduce an iovec's length in-place.  Return the resulting number of segments
+@@ -659,8 +676,8 @@
+ EXPORT_SYMBOL(vfs_writev);
+-asmlinkage ssize_t
+-sys_readv(unsigned long fd, const struct iovec __user *vec, unsigned long vlen)
++SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec,
++              unsigned long, vlen)
+ {
+       struct file *file;
+       ssize_t ret = -EBADF;
+@@ -680,8 +697,8 @@
+       return ret;
+ }
+-asmlinkage ssize_t
+-sys_writev(unsigned long fd, const struct iovec __user *vec, unsigned long vlen)
++SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec,
++              unsigned long, vlen)
+ {
+       struct file *file;
+       ssize_t ret = -EBADF;
+@@ -799,7 +816,7 @@
+       return retval;
+ }
+-asmlinkage ssize_t sys_sendfile(int out_fd, int in_fd, off_t __user *offset, size_t count)
++SYSCALL_DEFINE4(sendfile, int, out_fd, int, in_fd, off_t __user *, offset, size_t, count)
+ {
+       loff_t pos;
+       off_t off;
+@@ -818,7 +835,7 @@
+       return do_sendfile(out_fd, in_fd, NULL, count, 0);
+ }
+-asmlinkage ssize_t sys_sendfile64(int out_fd, int in_fd, loff_t __user *offset, size_t count)
++SYSCALL_DEFINE4(sendfile64, int, out_fd, int, in_fd, loff_t __user *, offset, size_t, count)
+ {
+       loff_t pos;
+       ssize_t ret;
+--- kernel-maemo-2.6.28.test.orig/fs/readdir.c
++++ kernel-maemo-2.6.28.test/fs/readdir.c
+@@ -102,7 +102,8 @@
+       return -EFAULT;
+ }
+-asmlinkage long old_readdir(unsigned int fd, struct old_linux_dirent __user * dirent, unsigned int count)
++SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
++              struct old_linux_dirent __user *, dirent, unsigned int, count)
+ {
+       int error;
+       struct file * file;
+@@ -187,7 +188,8 @@
+       return -EFAULT;
+ }
+-asmlinkage long sys_getdents(unsigned int fd, struct linux_dirent __user * dirent, unsigned int count)
++SYSCALL_DEFINE3(getdents, unsigned int, fd,
++              struct linux_dirent __user *, dirent, unsigned int, count)
+ {
+       struct file * file;
+       struct linux_dirent __user * lastdirent;
+@@ -268,7 +270,8 @@
+       return -EFAULT;
+ }
+-asmlinkage long sys_getdents64(unsigned int fd, struct linux_dirent64 __user * dirent, unsigned int count)
++SYSCALL_DEFINE3(getdents64, unsigned int, fd,
++              struct linux_dirent64 __user *, dirent, unsigned int, count)
+ {
+       struct file * file;
+       struct linux_dirent64 __user * lastdirent;
+--- kernel-maemo-2.6.28.test.orig/fs/reiserfs/inode.c
++++ kernel-maemo-2.6.28.test/fs/reiserfs/inode.c
+@@ -2556,7 +2556,7 @@
+       }
+       index = pos >> PAGE_CACHE_SHIFT;
+-      page = __grab_cache_page(mapping, index);
++      page = grab_cache_page_write_begin(mapping, index, flags);
+       if (!page)
+               return -ENOMEM;
+       *pagep = page;
+--- kernel-maemo-2.6.28.test.orig/fs/select.c
++++ kernel-maemo-2.6.28.test/fs/select.c
+@@ -507,8 +507,8 @@
+       return ret;
+ }
+-asmlinkage long sys_select(int n, fd_set __user *inp, fd_set __user *outp,
+-                      fd_set __user *exp, struct timeval __user *tvp)
++SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp,
++              fd_set __user *, exp, struct timeval __user *, tvp)
+ {
+       struct timespec end_time, *to = NULL;
+       struct timeval tv;
+@@ -532,9 +532,9 @@
+ }
+ #ifdef HAVE_SET_RESTORE_SIGMASK
+-asmlinkage long sys_pselect7(int n, fd_set __user *inp, fd_set __user *outp,
+-              fd_set __user *exp, struct timespec __user *tsp,
+-              const sigset_t __user *sigmask, size_t sigsetsize)
++static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp,
++                     fd_set __user *exp, struct timespec __user *tsp,
++                     const sigset_t __user *sigmask, size_t sigsetsize)
+ {
+       sigset_t ksigmask, sigsaved;
+       struct timespec ts, end_time, *to = NULL;
+@@ -560,7 +560,7 @@
+               sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
+       }
+-      ret = core_sys_select(n, inp, outp, exp, &end_time);
++      ret = core_sys_select(n, inp, outp, exp, to);
+       ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
+       if (ret == -ERESTARTNOHAND) {
+@@ -586,8 +586,9 @@
+  * which has a pointer to the sigset_t itself followed by a size_t containing
+  * the sigset size.
+  */
+-asmlinkage long sys_pselect6(int n, fd_set __user *inp, fd_set __user *outp,
+-      fd_set __user *exp, struct timespec __user *tsp, void __user *sig)
++SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp,
++              fd_set __user *, exp, struct timespec __user *, tsp,
++              void __user *, sig)
+ {
+       size_t sigsetsize = 0;
+       sigset_t __user *up = NULL;
+@@ -600,7 +601,7 @@
+                       return -EFAULT;
+       }
+-      return sys_pselect7(n, inp, outp, exp, tsp, up, sigsetsize);
++      return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize);
+ }
+ #endif /* HAVE_SET_RESTORE_SIGMASK */
+@@ -806,8 +807,8 @@
+       return ret;
+ }
+-asmlinkage long sys_poll(struct pollfd __user *ufds, unsigned int nfds,
+-                      long timeout_msecs)
++SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
++              long, timeout_msecs)
+ {
+       struct timespec end_time, *to = NULL;
+       int ret;
+@@ -841,9 +842,9 @@
+ }
+ #ifdef HAVE_SET_RESTORE_SIGMASK
+-asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds,
+-      struct timespec __user *tsp, const sigset_t __user *sigmask,
+-      size_t sigsetsize)
++SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds,
++              struct timespec __user *, tsp, const sigset_t __user *, sigmask,
++              size_t, sigsetsize)
+ {
+       sigset_t ksigmask, sigsaved;
+       struct timespec ts, end_time, *to = NULL;
+--- kernel-maemo-2.6.28.test.orig/fs/seq_file.c
++++ kernel-maemo-2.6.28.test/fs/seq_file.c
+@@ -48,12 +48,78 @@
+        */
+       file->f_version = 0;
+-      /* SEQ files support lseek, but not pread/pwrite */
+-      file->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE);
++      /*
++       * seq_files support lseek() and pread().  They do not implement
++       * write() at all, but we clear FMODE_PWRITE here for historical
++       * reasons.
++       *
++       * If a client of seq_files a) implements file.write() and b) wishes to
++       * support pwrite() then that client will need to implement its own
++       * file.open() which calls seq_open() and then sets FMODE_PWRITE.
++       */
++      file->f_mode &= ~FMODE_PWRITE;
+       return 0;
+ }
+ EXPORT_SYMBOL(seq_open);
++static int traverse(struct seq_file *m, loff_t offset)
++{
++      loff_t pos = 0, index;
++      int error = 0;
++      void *p;
++
++      m->version = 0;
++      index = 0;
++      m->count = m->from = 0;
++      if (!offset) {
++              m->index = index;
++              return 0;
++      }
++      if (!m->buf) {
++              m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
++              if (!m->buf)
++                      return -ENOMEM;
++      }
++      p = m->op->start(m, &index);
++      while (p) {
++              error = PTR_ERR(p);
++              if (IS_ERR(p))
++                      break;
++              error = m->op->show(m, p);
++              if (error < 0)
++                      break;
++              if (unlikely(error)) {
++                      error = 0;
++                      m->count = 0;
++              }
++              if (m->count == m->size)
++                      goto Eoverflow;
++              if (pos + m->count > offset) {
++                      m->from = offset - pos;
++                      m->count -= m->from;
++                      m->index = index;
++                      break;
++              }
++              pos += m->count;
++              m->count = 0;
++              if (pos == offset) {
++                      index++;
++                      m->index = index;
++                      break;
++              }
++              p = m->op->next(m, p, &index);
++      }
++      m->op->stop(m, p);
++      m->index = index;
++      return error;
++
++Eoverflow:
++      m->op->stop(m, p);
++      kfree(m->buf);
++      m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
++      return !m->buf ? -ENOMEM : -EAGAIN;
++}
++
+ /**
+  *    seq_read -      ->read() method for sequential files.
+  *    @file: the file to read from
+@@ -73,6 +139,22 @@
+       int err = 0;
+       mutex_lock(&m->lock);
++
++      /* Don't assume *ppos is where we left it */
++      if (unlikely(*ppos != m->read_pos)) {
++              m->read_pos = *ppos;
++              while ((err = traverse(m, *ppos)) == -EAGAIN)
++                      ;
++              if (err) {
++                      /* With prejudice... */
++                      m->read_pos = 0;
++                      m->version = 0;
++                      m->index = 0;
++                      m->count = 0;
++                      goto Done;
++              }
++      }
++
+       /*
+        * seq_file->op->..m_start/m_stop/m_next may do special actions
+        * or optimisations based on the file->f_version, so we want to
+@@ -172,8 +254,10 @@
+ Done:
+       if (!copied)
+               copied = err;
+-      else
++      else {
+               *ppos += copied;
++              m->read_pos += copied;
++      }
+       file->f_version = m->version;
+       mutex_unlock(&m->lock);
+       return copied;
+@@ -186,63 +270,6 @@
+ }
+ EXPORT_SYMBOL(seq_read);
+-static int traverse(struct seq_file *m, loff_t offset)
+-{
+-      loff_t pos = 0, index;
+-      int error = 0;
+-      void *p;
+-
+-      m->version = 0;
+-      index = 0;
+-      m->count = m->from = 0;
+-      if (!offset) {
+-              m->index = index;
+-              return 0;
+-      }
+-      if (!m->buf) {
+-              m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
+-              if (!m->buf)
+-                      return -ENOMEM;
+-      }
+-      p = m->op->start(m, &index);
+-      while (p) {
+-              error = PTR_ERR(p);
+-              if (IS_ERR(p))
+-                      break;
+-              error = m->op->show(m, p);
+-              if (error < 0)
+-                      break;
+-              if (unlikely(error)) {
+-                      error = 0;
+-                      m->count = 0;
+-              }
+-              if (m->count == m->size)
+-                      goto Eoverflow;
+-              if (pos + m->count > offset) {
+-                      m->from = offset - pos;
+-                      m->count -= m->from;
+-                      m->index = index;
+-                      break;
+-              }
+-              pos += m->count;
+-              m->count = 0;
+-              if (pos == offset) {
+-                      index++;
+-                      m->index = index;
+-                      break;
+-              }
+-              p = m->op->next(m, p, &index);
+-      }
+-      m->op->stop(m, p);
+-      return error;
+-
+-Eoverflow:
+-      m->op->stop(m, p);
+-      kfree(m->buf);
+-      m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
+-      return !m->buf ? -ENOMEM : -EAGAIN;
+-}
+-
+ /**
+  *    seq_lseek -     ->llseek() method for sequential files.
+  *    @file: the file in question
+@@ -265,16 +292,18 @@
+                       if (offset < 0)
+                               break;
+                       retval = offset;
+-                      if (offset != file->f_pos) {
++                      if (offset != m->read_pos) {
+                               while ((retval=traverse(m, offset)) == -EAGAIN)
+                                       ;
+                               if (retval) {
+                                       /* with extreme prejudice... */
+                                       file->f_pos = 0;
++                                      m->read_pos = 0;
+                                       m->version = 0;
+                                       m->index = 0;
+                                       m->count = 0;
+                               } else {
++                                      m->read_pos = offset;
+                                       retval = file->f_pos = offset;
+                               }
+                       }
+--- kernel-maemo-2.6.28.test.orig/fs/signalfd.c
++++ kernel-maemo-2.6.28.test/fs/signalfd.c
+@@ -205,8 +205,8 @@
+       .read           = signalfd_read,
+ };
+-asmlinkage long sys_signalfd4(int ufd, sigset_t __user *user_mask,
+-                            size_t sizemask, int flags)
++SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask,
++              size_t, sizemask, int, flags)
+ {
+       sigset_t sigmask;
+       struct signalfd_ctx *ctx;
+@@ -259,8 +259,8 @@
+       return ufd;
+ }
+-asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask,
+-                           size_t sizemask)
++SYSCALL_DEFINE3(signalfd, int, ufd, sigset_t __user *, user_mask,
++              size_t, sizemask)
+ {
+       return sys_signalfd4(ufd, user_mask, sizemask, 0);
+ }
+--- kernel-maemo-2.6.28.test.orig/fs/smbfs/file.c
++++ kernel-maemo-2.6.28.test/fs/smbfs/file.c
+@@ -297,7 +297,7 @@
+                       struct page **pagep, void **fsdata)
+ {
+       pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+-      *pagep = __grab_cache_page(mapping, index);
++      *pagep = grab_cache_page_write_begin(mapping, index, flags);
+       if (!*pagep)
+               return -ENOMEM;
+       return 0;
+--- kernel-maemo-2.6.28.test.orig/fs/splice.c
++++ kernel-maemo-2.6.28.test/fs/splice.c
+@@ -735,10 +735,19 @@
+        * ->write_end. Most of the time, these expect i_mutex to
+        * be held. Since this may result in an ABBA deadlock with
+        * pipe->inode, we have to order lock acquiry here.
++       *
++       * Outer lock must be inode->i_mutex, as pipe_wait() will
++       * release and reacquire pipe->inode->i_mutex, AND inode must
++       * never be a pipe.
+        */
+-      inode_double_lock(inode, pipe->inode);
++      WARN_ON(S_ISFIFO(inode->i_mode));
++      mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
++      if (pipe->inode)
++              mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_CHILD);
+       ret = __splice_from_pipe(pipe, &sd, actor);
+-      inode_double_unlock(inode, pipe->inode);
++      if (pipe->inode)
++              mutex_unlock(&pipe->inode->i_mutex);
++      mutex_unlock(&inode->i_mutex);
+       return ret;
+ }
+@@ -829,11 +838,17 @@
+       };
+       ssize_t ret;
+-      inode_double_lock(inode, pipe->inode);
++      WARN_ON(S_ISFIFO(inode->i_mode));
++      mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
+       ret = file_remove_suid(out);
+-      if (likely(!ret))
++      if (likely(!ret)) {
++              if (pipe->inode)
++                      mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_CHILD);
+               ret = __splice_from_pipe(pipe, &sd, pipe_to_file);
+-      inode_double_unlock(inode, pipe->inode);
++              if (pipe->inode)
++                      mutex_unlock(&pipe->inode->i_mutex);
++      }
++      mutex_unlock(&inode->i_mutex);
+       if (ret > 0) {
+               unsigned long nr_pages;
+@@ -1436,8 +1451,8 @@
+  * Currently we punt and implement it as a normal copy, see pipe_to_user().
+  *
+  */
+-asmlinkage long sys_vmsplice(int fd, const struct iovec __user *iov,
+-                           unsigned long nr_segs, unsigned int flags)
++SYSCALL_DEFINE4(vmsplice, int, fd, const struct iovec __user *, iov,
++              unsigned long, nr_segs, unsigned int, flags)
+ {
+       struct file *file;
+       long error;
+@@ -1462,9 +1477,9 @@
+       return error;
+ }
+-asmlinkage long sys_splice(int fd_in, loff_t __user *off_in,
+-                         int fd_out, loff_t __user *off_out,
+-                         size_t len, unsigned int flags)
++SYSCALL_DEFINE6(splice, int, fd_in, loff_t __user *, off_in,
++              int, fd_out, loff_t __user *, off_out,
++              size_t, len, unsigned int, flags)
+ {
+       long error;
+       struct file *in, *out;
+@@ -1686,7 +1701,7 @@
+       return ret;
+ }
+-asmlinkage long sys_tee(int fdin, int fdout, size_t len, unsigned int flags)
++SYSCALL_DEFINE4(tee, int, fdin, int, fdout, size_t, len, unsigned int, flags)
+ {
+       struct file *in;
+       int error, fput_in;
+--- kernel-maemo-2.6.28.test.orig/fs/stat.c
++++ kernel-maemo-2.6.28.test/fs/stat.c
+@@ -152,7 +152,7 @@
+       return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
+ }
+-asmlinkage long sys_stat(char __user * filename, struct __old_kernel_stat __user * statbuf)
++SYSCALL_DEFINE2(stat, char __user *, filename, struct __old_kernel_stat __user *, statbuf)
+ {
+       struct kstat stat;
+       int error = vfs_stat_fd(AT_FDCWD, filename, &stat);
+@@ -162,7 +162,8 @@
+       return error;
+ }
+-asmlinkage long sys_lstat(char __user * filename, struct __old_kernel_stat __user * statbuf)
++
++SYSCALL_DEFINE2(lstat, char __user *, filename, struct __old_kernel_stat __user *, statbuf)
+ {
+       struct kstat stat;
+       int error = vfs_lstat_fd(AT_FDCWD, filename, &stat);
+@@ -172,7 +173,8 @@
+       return error;
+ }
+-asmlinkage long sys_fstat(unsigned int fd, struct __old_kernel_stat __user * statbuf)
++
++SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf)
+ {
+       struct kstat stat;
+       int error = vfs_fstat(fd, &stat);
+@@ -235,7 +237,7 @@
+       return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
+ }
+-asmlinkage long sys_newstat(char __user *filename, struct stat __user *statbuf)
++SYSCALL_DEFINE2(newstat, char __user *, filename, struct stat __user *, statbuf)
+ {
+       struct kstat stat;
+       int error = vfs_stat_fd(AT_FDCWD, filename, &stat);
+@@ -246,7 +248,7 @@
+       return error;
+ }
+-asmlinkage long sys_newlstat(char __user *filename, struct stat __user *statbuf)
++SYSCALL_DEFINE2(newlstat, char __user *, filename, struct stat __user *, statbuf)
+ {
+       struct kstat stat;
+       int error = vfs_lstat_fd(AT_FDCWD, filename, &stat);
+@@ -258,8 +260,8 @@
+ }
+ #if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT)
+-asmlinkage long sys_newfstatat(int dfd, char __user *filename,
+-                              struct stat __user *statbuf, int flag)
++SYSCALL_DEFINE4(newfstatat, int, dfd, char __user *, filename,
++              struct stat __user *, statbuf, int, flag)
+ {
+       struct kstat stat;
+       int error = -EINVAL;
+@@ -280,7 +282,7 @@
+ }
+ #endif
+-asmlinkage long sys_newfstat(unsigned int fd, struct stat __user *statbuf)
++SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf)
+ {
+       struct kstat stat;
+       int error = vfs_fstat(fd, &stat);
+@@ -291,8 +293,8 @@
+       return error;
+ }
+-asmlinkage long sys_readlinkat(int dfd, const char __user *pathname,
+-                              char __user *buf, int bufsiz)
++SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname,
++              char __user *, buf, int, bufsiz)
+ {
+       struct path path;
+       int error;
+@@ -318,8 +320,8 @@
+       return error;
+ }
+-asmlinkage long sys_readlink(const char __user *path, char __user *buf,
+-                              int bufsiz)
++SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf,
++              int, bufsiz)
+ {
+       return sys_readlinkat(AT_FDCWD, path, buf, bufsiz);
+ }
+@@ -365,7 +367,7 @@
+       return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
+ }
+-asmlinkage long sys_stat64(char __user * filename, struct stat64 __user * statbuf)
++SYSCALL_DEFINE2(stat64, char __user *, filename, struct stat64 __user *, statbuf)
+ {
+       struct kstat stat;
+       int error = vfs_stat(filename, &stat);
+@@ -375,7 +377,8 @@
+       return error;
+ }
+-asmlinkage long sys_lstat64(char __user * filename, struct stat64 __user * statbuf)
++
++SYSCALL_DEFINE2(lstat64, char __user *, filename, struct stat64 __user *, statbuf)
+ {
+       struct kstat stat;
+       int error = vfs_lstat(filename, &stat);
+@@ -385,7 +388,8 @@
+       return error;
+ }
+-asmlinkage long sys_fstat64(unsigned long fd, struct stat64 __user * statbuf)
++
++SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf)
+ {
+       struct kstat stat;
+       int error = vfs_fstat(fd, &stat);
+@@ -396,8 +400,8 @@
+       return error;
+ }
+-asmlinkage long sys_fstatat64(int dfd, char __user *filename,
+-                             struct stat64 __user *statbuf, int flag)
++SYSCALL_DEFINE4(fstatat64, int, dfd, char __user *, filename,
++              struct stat64 __user *, statbuf, int, flag)
+ {
+       struct kstat stat;
+       int error = -EINVAL;
+--- kernel-maemo-2.6.28.test.orig/fs/super.c
++++ kernel-maemo-2.6.28.test/fs/super.c
+@@ -534,7 +534,7 @@
+       return NULL;
+ }
+-asmlinkage long sys_ustat(unsigned dev, struct ustat __user * ubuf)
++SYSCALL_DEFINE2(ustat, unsigned, dev, struct ustat __user *, ubuf)
+ {
+         struct super_block *s;
+         struct ustat tmp;
+--- kernel-maemo-2.6.28.test.orig/fs/sync.c
++++ kernel-maemo-2.6.28.test/fs/sync.c
+@@ -36,7 +36,7 @@
+               laptop_sync_completion();
+ }
+-asmlinkage long sys_sync(void)
++SYSCALL_DEFINE0(sync)
+ {
+       do_sync(1);
+       return 0;
+@@ -118,12 +118,12 @@
+       return ret;
+ }
+-asmlinkage long sys_fsync(unsigned int fd)
++SYSCALL_DEFINE1(fsync, unsigned int, fd)
+ {
+       return __do_fsync(fd, 0);
+ }
+-asmlinkage long sys_fdatasync(unsigned int fd)
++SYSCALL_DEFINE1(fdatasync, unsigned int, fd)
+ {
+       return __do_fsync(fd, 1);
+ }
+@@ -175,8 +175,8 @@
+  * already-instantiated disk blocks, there are no guarantees here that the data
+  * will be available after a crash.
+  */
+-asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes,
+-                                      unsigned int flags)
++SYSCALL_DEFINE(sync_file_range)(int fd, loff_t offset, loff_t nbytes,
++                              unsigned int flags)
+ {
+       int ret;
+       struct file *file;
+@@ -236,14 +236,32 @@
+ out:
+       return ret;
+ }
++#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
++asmlinkage long SyS_sync_file_range(long fd, loff_t offset, loff_t nbytes,
++                                  long flags)
++{
++      return SYSC_sync_file_range((int) fd, offset, nbytes,
++                                  (unsigned int) flags);
++}
++SYSCALL_ALIAS(sys_sync_file_range, SyS_sync_file_range);
++#endif
+ /* It would be nice if people remember that not all the world's an i386
+    when they introduce new system calls */
+-asmlinkage long sys_sync_file_range2(int fd, unsigned int flags,
+-                                   loff_t offset, loff_t nbytes)
++SYSCALL_DEFINE(sync_file_range2)(int fd, unsigned int flags,
++                               loff_t offset, loff_t nbytes)
+ {
+       return sys_sync_file_range(fd, offset, nbytes, flags);
+ }
++#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
++asmlinkage long SyS_sync_file_range2(long fd, long flags,
++                                   loff_t offset, loff_t nbytes)
++{
++      return SYSC_sync_file_range2((int) fd, (unsigned int) flags,
++                                   offset, nbytes);
++}
++SYSCALL_ALIAS(sys_sync_file_range2, SyS_sync_file_range2);
++#endif
+ /*
+  * `endbyte' is inclusive
+@@ -269,7 +287,7 @@
+       if (flags & SYNC_FILE_RANGE_WRITE) {
+               ret = __filemap_fdatawrite_range(mapping, offset, endbyte,
+-                                              WB_SYNC_NONE);
++                                              WB_SYNC_ALL);
+               if (ret < 0)
+                       goto out;
+       }
+--- kernel-maemo-2.6.28.test.orig/fs/sysfs/bin.c
++++ kernel-maemo-2.6.28.test/fs/sysfs/bin.c
+@@ -63,6 +63,9 @@
+       int count = min_t(size_t, bytes, PAGE_SIZE);
+       char *temp;
++      if (!bytes)
++              return 0;
++
+       if (size) {
+               if (offs > size)
+                       return 0;
+@@ -131,6 +134,9 @@
+       int count = min_t(size_t, bytes, PAGE_SIZE);
+       char *temp;
++      if (!bytes)
++              return 0;
++
+       if (size) {
+               if (offs > size)
+                       return 0;
+--- kernel-maemo-2.6.28.test.orig/fs/timerfd.c
++++ kernel-maemo-2.6.28.test/fs/timerfd.c
+@@ -177,7 +177,7 @@
+       return file;
+ }
+-asmlinkage long sys_timerfd_create(int clockid, int flags)
++SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
+ {
+       int ufd;
+       struct timerfd_ctx *ctx;
+@@ -186,10 +186,9 @@
+       BUILD_BUG_ON(TFD_CLOEXEC != O_CLOEXEC);
+       BUILD_BUG_ON(TFD_NONBLOCK != O_NONBLOCK);
+-      if (flags & ~(TFD_CLOEXEC | TFD_NONBLOCK))
+-              return -EINVAL;
+-      if (clockid != CLOCK_MONOTONIC &&
+-          clockid != CLOCK_REALTIME)
++      if ((flags & ~TFD_CREATE_FLAGS) ||
++          (clockid != CLOCK_MONOTONIC &&
++           clockid != CLOCK_REALTIME))
+               return -EINVAL;
+       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+@@ -201,16 +200,16 @@
+       hrtimer_init(&ctx->tmr, clockid, HRTIMER_MODE_ABS);
+       ufd = anon_inode_getfd("[timerfd]", &timerfd_fops, ctx,
+-                             flags & (O_CLOEXEC | O_NONBLOCK));
++                             flags & TFD_SHARED_FCNTL_FLAGS);
+       if (ufd < 0)
+               kfree(ctx);
+       return ufd;
+ }
+-asmlinkage long sys_timerfd_settime(int ufd, int flags,
+-                                  const struct itimerspec __user *utmr,
+-                                  struct itimerspec __user *otmr)
++SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
++              const struct itimerspec __user *, utmr,
++              struct itimerspec __user *, otmr)
+ {
+       struct file *file;
+       struct timerfd_ctx *ctx;
+@@ -219,7 +218,8 @@
+       if (copy_from_user(&ktmr, utmr, sizeof(ktmr)))
+               return -EFAULT;
+-      if (!timespec_valid(&ktmr.it_value) ||
++      if ((flags & ~TFD_SETTIME_FLAGS) ||
++          !timespec_valid(&ktmr.it_value) ||
+           !timespec_valid(&ktmr.it_interval))
+               return -EINVAL;
+@@ -265,7 +265,7 @@
+       return 0;
+ }
+-asmlinkage long sys_timerfd_gettime(int ufd, struct itimerspec __user *otmr)
++SYSCALL_DEFINE2(timerfd_gettime, int, ufd, struct itimerspec __user *, otmr)
+ {
+       struct file *file;
+       struct timerfd_ctx *ctx;
+--- kernel-maemo-2.6.28.test.orig/fs/ubifs/file.c
++++ kernel-maemo-2.6.28.test/fs/ubifs/file.c
+@@ -219,7 +219,8 @@
+ }
+ static int write_begin_slow(struct address_space *mapping,
+-                          loff_t pos, unsigned len, struct page **pagep)
++                          loff_t pos, unsigned len, struct page **pagep,
++                          unsigned flags)
+ {
+       struct inode *inode = mapping->host;
+       struct ubifs_info *c = inode->i_sb->s_fs_info;
+@@ -247,7 +248,7 @@
+       if (unlikely(err))
+               return err;
+-      page = __grab_cache_page(mapping, index);
++      page = grab_cache_page_write_begin(mapping, index, flags);
+       if (unlikely(!page)) {
+               ubifs_release_budget(c, &req);
+               return -ENOMEM;
+@@ -438,7 +439,7 @@
+               return -EROFS;
+       /* Try out the fast-path part first */
+-      page = __grab_cache_page(mapping, index);
++      page = grab_cache_page_write_begin(mapping, index, flags);
+       if (unlikely(!page))
+               return -ENOMEM;
+@@ -492,7 +493,7 @@
+               unlock_page(page);
+               page_cache_release(page);
+-              return write_begin_slow(mapping, pos, len, pagep);
++              return write_begin_slow(mapping, pos, len, pagep, flags);
+       }
+       /*
+--- kernel-maemo-2.6.28.test.orig/fs/utimes.c
++++ kernel-maemo-2.6.28.test/fs/utimes.c
+@@ -24,7 +24,7 @@
+  * must be owner or have write permission.
+  * Else, update from *times, must be owner or super user.
+  */
+-asmlinkage long sys_utime(char __user *filename, struct utimbuf __user *times)
++SYSCALL_DEFINE2(utime, char __user *, filename, struct utimbuf __user *, times)
+ {
+       struct timespec tv[2];
+@@ -170,7 +170,8 @@
+       return error;
+ }
+-asmlinkage long sys_utimensat(int dfd, char __user *filename, struct timespec __user *utimes, int flags)
++SYSCALL_DEFINE4(utimensat, int, dfd, char __user *, filename,
++              struct timespec __user *, utimes, int, flags)
+ {
+       struct timespec tstimes[2];
+@@ -187,7 +188,8 @@
+       return do_utimes(dfd, filename, utimes ? tstimes : NULL, flags);
+ }
+-asmlinkage long sys_futimesat(int dfd, char __user *filename, struct timeval __user *utimes)
++SYSCALL_DEFINE3(futimesat, int, dfd, char __user *, filename,
++              struct timeval __user *, utimes)
+ {
+       struct timeval times[2];
+       struct timespec tstimes[2];
+@@ -214,7 +216,8 @@
+       return do_utimes(dfd, filename, utimes ? tstimes : NULL, 0);
+ }
+-asmlinkage long sys_utimes(char __user *filename, struct timeval __user *utimes)
++SYSCALL_DEFINE2(utimes, char __user *, filename,
++              struct timeval __user *, utimes)
+ {
+       return sys_futimesat(AT_FDCWD, filename, utimes);
+ }
+--- kernel-maemo-2.6.28.test.orig/fs/xattr.c
++++ kernel-maemo-2.6.28.test/fs/xattr.c
+@@ -251,9 +251,9 @@
+       return error;
+ }
+-asmlinkage long
+-sys_setxattr(const char __user *pathname, const char __user *name,
+-           const void __user *value, size_t size, int flags)
++SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
++              const char __user *, name, const void __user *, value,
++              size_t, size, int, flags)
+ {
+       struct path path;
+       int error;
+@@ -270,9 +270,9 @@
+       return error;
+ }
+-asmlinkage long
+-sys_lsetxattr(const char __user *pathname, const char __user *name,
+-            const void __user *value, size_t size, int flags)
++SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
++              const char __user *, name, const void __user *, value,
++              size_t, size, int, flags)
+ {
+       struct path path;
+       int error;
+@@ -289,9 +289,8 @@
+       return error;
+ }
+-asmlinkage long
+-sys_fsetxattr(int fd, const char __user *name, const void __user *value,
+-            size_t size, int flags)
++SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
++              const void __user *,value, size_t, size, int, flags)
+ {
+       struct file *f;
+       struct dentry *dentry;
+@@ -349,9 +348,8 @@
+       return error;
+ }
+-asmlinkage ssize_t
+-sys_getxattr(const char __user *pathname, const char __user *name,
+-           void __user *value, size_t size)
++SYSCALL_DEFINE4(getxattr, const char __user *, pathname,
++              const char __user *, name, void __user *, value, size_t, size)
+ {
+       struct path path;
+       ssize_t error;
+@@ -364,9 +362,8 @@
+       return error;
+ }
+-asmlinkage ssize_t
+-sys_lgetxattr(const char __user *pathname, const char __user *name, void __user *value,
+-            size_t size)
++SYSCALL_DEFINE4(lgetxattr, const char __user *, pathname,
++              const char __user *, name, void __user *, value, size_t, size)
+ {
+       struct path path;
+       ssize_t error;
+@@ -379,8 +376,8 @@
+       return error;
+ }
+-asmlinkage ssize_t
+-sys_fgetxattr(int fd, const char __user *name, void __user *value, size_t size)
++SYSCALL_DEFINE4(fgetxattr, int, fd, const char __user *, name,
++              void __user *, value, size_t, size)
+ {
+       struct file *f;
+       ssize_t error = -EBADF;
+@@ -424,8 +421,8 @@
+       return error;
+ }
+-asmlinkage ssize_t
+-sys_listxattr(const char __user *pathname, char __user *list, size_t size)
++SYSCALL_DEFINE3(listxattr, const char __user *, pathname, char __user *, list,
++              size_t, size)
+ {
+       struct path path;
+       ssize_t error;
+@@ -438,8 +435,8 @@
+       return error;
+ }
+-asmlinkage ssize_t
+-sys_llistxattr(const char __user *pathname, char __user *list, size_t size)
++SYSCALL_DEFINE3(llistxattr, const char __user *, pathname, char __user *, list,
++              size_t, size)
+ {
+       struct path path;
+       ssize_t error;
+@@ -452,8 +449,7 @@
+       return error;
+ }
+-asmlinkage ssize_t
+-sys_flistxattr(int fd, char __user *list, size_t size)
++SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
+ {
+       struct file *f;
+       ssize_t error = -EBADF;
+@@ -485,8 +481,8 @@
+       return vfs_removexattr(d, kname);
+ }
+-asmlinkage long
+-sys_removexattr(const char __user *pathname, const char __user *name)
++SYSCALL_DEFINE2(removexattr, const char __user *, pathname,
++              const char __user *, name)
+ {
+       struct path path;
+       int error;
+@@ -503,8 +499,8 @@
+       return error;
+ }
+-asmlinkage long
+-sys_lremovexattr(const char __user *pathname, const char __user *name)
++SYSCALL_DEFINE2(lremovexattr, const char __user *, pathname,
++              const char __user *, name)
+ {
+       struct path path;
+       int error;
+@@ -521,8 +517,7 @@
+       return error;
+ }
+-asmlinkage long
+-sys_fremovexattr(int fd, const char __user *name)
++SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
+ {
+       struct file *f;
+       struct dentry *dentry;
+--- kernel-maemo-2.6.28.test.orig/fs/xfs/linux-2.6/xfs_buf.c
++++ kernel-maemo-2.6.28.test/fs/xfs/linux-2.6/xfs_buf.c
+@@ -1114,8 +1114,7 @@
+       unsigned int            blocksize = bp->b_target->bt_bsize;
+       struct bio_vec          *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+-      if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
+-              bp->b_error = EIO;
++      xfs_buf_ioerror(bp, -error);
+       do {
+               struct page     *page = bvec->bv_page;
+--- kernel-maemo-2.6.28.test.orig/fs/xfs/xfs_dir2_block.c
++++ kernel-maemo-2.6.28.test/fs/xfs/xfs_dir2_block.c
+@@ -517,9 +517,9 @@
+               /*
+                * If it didn't fit, set the final offset to here & return.
+                */
+-              if (filldir(dirent, dep->name, dep->namelen, cook,
++              if (filldir(dirent, dep->name, dep->namelen, cook & 0x7fffffff,
+                           ino, DT_UNKNOWN)) {
+-                      *offset = cook;
++                      *offset = cook & 0x7fffffff;
+                       xfs_da_brelse(NULL, bp);
+                       return 0;
+               }
+@@ -529,7 +529,8 @@
+        * Reached the end of the block.
+        * Set the offset to a non-existent block 1 and return.
+        */
+-      *offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0);
++      *offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0) &
++                      0x7fffffff;
+       xfs_da_brelse(NULL, bp);
+       return 0;
+ }
+--- kernel-maemo-2.6.28.test.orig/fs/xfs/xfs_dir2_leaf.c
++++ kernel-maemo-2.6.28.test/fs/xfs/xfs_dir2_leaf.c
+@@ -1092,7 +1092,7 @@
+                * Won't fit.  Return to caller.
+                */
+               if (filldir(dirent, dep->name, dep->namelen,
+-                          xfs_dir2_byte_to_dataptr(mp, curoff),
++                          xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff,
+                           ino, DT_UNKNOWN))
+                       break;
+@@ -1108,9 +1108,9 @@
+        * All done.  Set output offset value to current offset.
+        */
+       if (curoff > xfs_dir2_dataptr_to_byte(mp, XFS_DIR2_MAX_DATAPTR))
+-              *offset = XFS_DIR2_MAX_DATAPTR;
++              *offset = XFS_DIR2_MAX_DATAPTR & 0x7fffffff;
+       else
+-              *offset = xfs_dir2_byte_to_dataptr(mp, curoff);
++              *offset = xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff;
+       kmem_free(map);
+       if (bp)
+               xfs_da_brelse(NULL, bp);
+--- kernel-maemo-2.6.28.test.orig/fs/xfs/xfs_dir2_sf.c
++++ kernel-maemo-2.6.28.test/fs/xfs/xfs_dir2_sf.c
+@@ -752,8 +752,8 @@
+ #if XFS_BIG_INUMS
+               ino += mp->m_inoadd;
+ #endif
+-              if (filldir(dirent, ".", 1, dot_offset, ino, DT_DIR)) {
+-                      *offset = dot_offset;
++              if (filldir(dirent, ".", 1, dot_offset & 0x7fffffff, ino, DT_DIR)) {
++                      *offset = dot_offset & 0x7fffffff;
+                       return 0;
+               }
+       }
+@@ -766,8 +766,8 @@
+ #if XFS_BIG_INUMS
+               ino += mp->m_inoadd;
+ #endif
+-              if (filldir(dirent, "..", 2, dotdot_offset, ino, DT_DIR)) {
+-                      *offset = dotdot_offset;
++              if (filldir(dirent, "..", 2, dotdot_offset & 0x7fffffff, ino, DT_DIR)) {
++                      *offset = dotdot_offset & 0x7fffffff;
+                       return 0;
+               }
+       }
+@@ -791,14 +791,15 @@
+ #endif
+               if (filldir(dirent, sfep->name, sfep->namelen,
+-                                          off, ino, DT_UNKNOWN)) {
+-                      *offset = off;
++                          off & 0x7fffffff, ino, DT_UNKNOWN)) {
++                      *offset = off & 0x7fffffff;
+                       return 0;
+               }
+               sfep = xfs_dir2_sf_nextentry(sfp, sfep);
+       }
+-      *offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0);
++      *offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0) &
++                      0x7fffffff;
+       return 0;
+ }
+--- kernel-maemo-2.6.28.test.orig/include/acpi/pdc_intel.h
++++ kernel-maemo-2.6.28.test/include/acpi/pdc_intel.h
+@@ -14,6 +14,7 @@
+ #define ACPI_PDC_SMP_T_SWCOORD                (0x0080)
+ #define ACPI_PDC_C_C1_FFH             (0x0100)
+ #define ACPI_PDC_C_C2C3_FFH           (0x0200)
++#define ACPI_PDC_SMP_P_HWCOORD                (0x0800)
+ #define ACPI_PDC_EST_CAPABILITY_SMP   (ACPI_PDC_SMP_C1PT | \
+                                        ACPI_PDC_C_C1_HALT | \
+@@ -22,6 +23,7 @@
+ #define ACPI_PDC_EST_CAPABILITY_SWSMP (ACPI_PDC_SMP_C1PT | \
+                                        ACPI_PDC_C_C1_HALT | \
+                                        ACPI_PDC_SMP_P_SWCOORD | \
++                                       ACPI_PDC_SMP_P_HWCOORD | \
+                                        ACPI_PDC_P_FFH)
+ #define ACPI_PDC_C_CAPABILITY_SMP     (ACPI_PDC_SMP_C2C3  | \
+--- kernel-maemo-2.6.28.test.orig/include/drm/i915_drm.h
++++ kernel-maemo-2.6.28.test/include/drm/i915_drm.h
+@@ -177,6 +177,8 @@
+ #define DRM_IOCTL_I915_SET_VBLANK_PIPE        DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
+ #define DRM_IOCTL_I915_GET_VBLANK_PIPE        DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
+ #define DRM_IOCTL_I915_VBLANK_SWAP    DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
++#define DRM_IOCTL_I915_GEM_INIT               DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
++#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
+ #define DRM_IOCTL_I915_GEM_PIN                DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
+ #define DRM_IOCTL_I915_GEM_UNPIN      DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
+ #define DRM_IOCTL_I915_GEM_BUSY               DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
+--- kernel-maemo-2.6.28.test.orig/include/linux/Kbuild
++++ kernel-maemo-2.6.28.test/include/linux/Kbuild
+@@ -41,6 +41,7 @@
+ header-y += bfs_fs.h
+ header-y += blkpg.h
+ header-y += bpqether.h
++header-y += bsg.h
+ header-y += can.h
+ header-y += cdk.h
+ header-y += chio.h
+@@ -91,7 +92,6 @@
+ header-y += if_slip.h
+ header-y += if_strip.h
+ header-y += if_tun.h
+-header-y += if_tunnel.h
+ header-y += in_route.h
+ header-y += ioctl.h
+ header-y += ip6_tunnel.h
+@@ -240,6 +240,7 @@
+ unifdef-y += if_pppol2tp.h
+ unifdef-y += if_pppox.h
+ unifdef-y += if_tr.h
++unifdef-y += if_tunnel.h
+ unifdef-y += if_vlan.h
+ unifdef-y += igmp.h
+ unifdef-y += inet_diag.h
+--- kernel-maemo-2.6.28.test.orig/include/linux/ata.h
++++ kernel-maemo-2.6.28.test/include/linux/ata.h
+@@ -731,12 +731,17 @@
+ static inline int ata_id_is_cfa(const u16 *id)
+ {
+-      if (id[ATA_ID_CONFIG] == 0x848A)        /* Standard CF */
++      if (id[ATA_ID_CONFIG] == 0x848A)        /* Traditional CF */
+               return 1;
+-      /* Could be CF hiding as standard ATA */
+-      if (ata_id_major_version(id) >= 3 &&
+-          id[ATA_ID_COMMAND_SET_1] != 0xFFFF &&
+-         (id[ATA_ID_COMMAND_SET_1] & (1 << 2)))
++      /*
++       * CF specs don't require specific value in the word 0 anymore and yet
++       * they forbid to report the ATA version in the word 80 and require the
++       * CFA feature set support to be indicated in the word 83 in this case.
++       * Unfortunately, some cards only follow either of this requirements,
++       * and while those that don't indicate CFA feature support need some
++       * sort of quirk list, it seems impractical for the ones that do...
++       */
++      if ((id[ATA_ID_COMMAND_SET_2] & 0xC004) == 0x4004)
+               return 1;
+       return 0;
+ }
+--- kernel-maemo-2.6.28.test.orig/include/linux/capability.h
++++ kernel-maemo-2.6.28.test/include/linux/capability.h
+@@ -366,7 +366,21 @@
+ #define CAP_FOR_EACH_U32(__capi)  \
+       for (__capi = 0; __capi < _KERNEL_CAPABILITY_U32S; ++__capi)
++/*
++ * CAP_FS_MASK and CAP_NFSD_MASKS:
++ *
++ * The fs mask is all the privileges that fsuid==0 historically meant.
++ * At one time in the past, that included CAP_MKNOD and CAP_LINUX_IMMUTABLE.
++ *
++ * It has never meant setting security.* and trusted.* xattrs.
++ *
++ * We could also define fsmask as follows:
++ *   1. CAP_FS_MASK is the privilege to bypass all fs-related DAC permissions
++ *   2. The security.* and trusted.* xattrs are fs-related MAC permissions
++ */
++
+ # define CAP_FS_MASK_B0     (CAP_TO_MASK(CAP_CHOWN)           \
++                          | CAP_TO_MASK(CAP_MKNOD)            \
+                           | CAP_TO_MASK(CAP_DAC_OVERRIDE)     \
+                           | CAP_TO_MASK(CAP_DAC_READ_SEARCH)  \
+                           | CAP_TO_MASK(CAP_FOWNER)           \
+@@ -381,9 +395,12 @@
+ # define CAP_EMPTY_SET    ((kernel_cap_t){{ 0, 0 }})
+ # define CAP_FULL_SET     ((kernel_cap_t){{ ~0, ~0 }})
+ # define CAP_INIT_EFF_SET ((kernel_cap_t){{ ~CAP_TO_MASK(CAP_SETPCAP), ~0 }})
+-# define CAP_FS_SET       ((kernel_cap_t){{ CAP_FS_MASK_B0, CAP_FS_MASK_B1 } })
+-# define CAP_NFSD_SET     ((kernel_cap_t){{ CAP_FS_MASK_B0|CAP_TO_MASK(CAP_SYS_RESOURCE), \
+-                                      CAP_FS_MASK_B1 } })
++# define CAP_FS_SET       ((kernel_cap_t){{ CAP_FS_MASK_B0 \
++                                  | CAP_TO_MASK(CAP_LINUX_IMMUTABLE), \
++                                  CAP_FS_MASK_B1 } })
++# define CAP_NFSD_SET     ((kernel_cap_t){{ CAP_FS_MASK_B0 \
++                                  | CAP_TO_MASK(CAP_SYS_RESOURCE), \
++                                  CAP_FS_MASK_B1 } })
+ #endif /* _KERNEL_CAPABILITY_U32S != 2 */
+--- kernel-maemo-2.6.28.test.orig/include/linux/compat.h
++++ kernel-maemo-2.6.28.test/include/linux/compat.h
+@@ -280,5 +280,18 @@
+ asmlinkage long compat_sys_timerfd_gettime(int ufd,
+                                  struct compat_itimerspec __user *otmr);
++asmlinkage long compat_sys_move_pages(pid_t pid, unsigned long nr_page,
++                                    __u32 __user *pages,
++                                    const int __user *nodes,
++                                    int __user *status,
++                                    int flags);
++asmlinkage long compat_sys_futimesat(unsigned int dfd, char __user *filename,
++                                   struct compat_timeval __user *t);
++asmlinkage long compat_sys_newfstatat(unsigned int dfd, char __user * filename,
++                                    struct compat_stat __user *statbuf,
++                                    int flag);
++asmlinkage long compat_sys_openat(unsigned int dfd, const char __user *filename,
++                                int flags, int mode);
++
+ #endif /* CONFIG_COMPAT */
+ #endif /* _LINUX_COMPAT_H */
+--- kernel-maemo-2.6.28.test.orig/include/linux/fs.h
++++ kernel-maemo-2.6.28.test/include/linux/fs.h
+@@ -63,24 +63,30 @@
+ #define MAY_ACCESS 16
+ #define MAY_OPEN 32
++/*
++ * flags in file.f_mode.  Note that FMODE_READ and FMODE_WRITE must correspond
++ * to O_WRONLY and O_RDWR via the strange trick in __dentry_open()
++ */
++
+ /* file is open for reading */
+ #define FMODE_READ            ((__force fmode_t)1)
+ /* file is open for writing */
+ #define FMODE_WRITE           ((__force fmode_t)2)
+ /* file is seekable */
+ #define FMODE_LSEEK           ((__force fmode_t)4)
+-/* file can be accessed using pread/pwrite */
++/* file can be accessed using pread */
+ #define FMODE_PREAD           ((__force fmode_t)8)
+-#define FMODE_PWRITE          FMODE_PREAD     /* These go hand in hand */
++/* file can be accessed using pwrite */
++#define FMODE_PWRITE          ((__force fmode_t)16)
+ /* File is opened for execution with sys_execve / sys_uselib */
+-#define FMODE_EXEC            ((__force fmode_t)16)
++#define FMODE_EXEC            ((__force fmode_t)32)
+ /* File is opened with O_NDELAY (only set for block devices) */
+-#define FMODE_NDELAY          ((__force fmode_t)32)
++#define FMODE_NDELAY          ((__force fmode_t)64)
+ /* File is opened with O_EXCL (only set for block devices) */
+-#define FMODE_EXCL            ((__force fmode_t)64)
++#define FMODE_EXCL            ((__force fmode_t)128)
+ /* File is opened using open(.., 3, ..) and is writeable only for ioctls
+    (specialy hack for floppy.c) */
+-#define FMODE_WRITE_IOCTL     ((__force fmode_t)128)
++#define FMODE_WRITE_IOCTL     ((__force fmode_t)256)
+ #define RW_MASK               1
+ #define RWA_MASK      2
+@@ -414,6 +420,9 @@
+ #define AOP_FLAG_UNINTERRUPTIBLE      0x0001 /* will not do a short write */
+ #define AOP_FLAG_CONT_EXPAND          0x0002 /* called from cont_expand */
++#define AOP_FLAG_NOFS                 0x0004 /* used by filesystem to direct
++                                              * helper code (eg buffer layer)
++                                              * to clear GFP_FS from alloc */
+ /*
+  * oh the beauties of C type declarations.
+@@ -1121,7 +1130,6 @@
+       struct rw_semaphore     s_umount;
+       struct mutex            s_lock;
+       int                     s_count;
+-      int                     s_syncing;
+       int                     s_need_sync_fs;
+       atomic_t                s_active;
+ #ifdef CONFIG_SECURITY
+@@ -2036,7 +2044,7 @@
+ extern void *page_follow_link_light(struct dentry *, struct nameidata *);
+ extern void page_put_link(struct dentry *, struct nameidata *, void *);
+ extern int __page_symlink(struct inode *inode, const char *symname, int len,
+-              gfp_t gfp_mask);
++              int nofs);
+ extern int page_symlink(struct inode *inode, const char *symname, int len);
+ extern const struct inode_operations page_symlink_inode_operations;
+ extern int generic_readlink(struct dentry *, char __user *, int);
+--- kernel-maemo-2.6.28.test.orig/include/linux/genhd.h
++++ kernel-maemo-2.6.28.test/include/linux/genhd.h
+@@ -213,6 +213,7 @@
+ #define DISK_PITER_REVERSE    (1 << 0) /* iterate in the reverse direction */
+ #define DISK_PITER_INCL_EMPTY (1 << 1) /* include 0-sized parts */
+ #define DISK_PITER_INCL_PART0 (1 << 2) /* include partition 0 */
++#define DISK_PITER_INCL_EMPTY_PART0 (1 << 3) /* include empty partition 0 */
+ struct disk_part_iter {
+       struct gendisk          *disk;
+--- kernel-maemo-2.6.28.test.orig/include/linux/if_tunnel.h
++++ kernel-maemo-2.6.28.test/include/linux/if_tunnel.h
+@@ -2,7 +2,10 @@
+ #define _IF_TUNNEL_H_
+ #include <linux/types.h>
++
++#ifdef __KERNEL__
+ #include <linux/ip.h>
++#endif
+ #define SIOCGETTUNNEL   (SIOCDEVPRIVATE + 0)
+ #define SIOCADDTUNNEL   (SIOCDEVPRIVATE + 1)
+--- kernel-maemo-2.6.28.test.orig/include/linux/jbd2.h
++++ kernel-maemo-2.6.28.test/include/linux/jbd2.h
+@@ -308,7 +308,8 @@
+               int val = (expr);                                            \
+               if (!val) {                                                  \
+                       printk(KERN_ERR                                      \
+-                              "EXT3-fs unexpected failure: %s;\n",# expr); \
++                             "JBD2 unexpected failure: %s: %s;\n",         \
++                             __func__, #expr);                             \
+                       printk(KERN_ERR why "\n");                           \
+               }                                                            \
+               val;                                                         \
+@@ -329,6 +330,7 @@
+       BH_State,               /* Pins most journal_head state */
+       BH_JournalHead,         /* Pins bh->b_private and jh->b_bh */
+       BH_Unshadow,            /* Dummy bit, for BJ_Shadow wakeup filtering */
++      BH_JBDPrivateStart,     /* First bit available for private use by FS */
+ };
+ BUFFER_FNS(JBD, jbd)
+@@ -1085,7 +1087,8 @@
+ extern int       jbd2_journal_bmap(journal_t *, unsigned long, unsigned long long *);
+ extern int       jbd2_journal_force_commit(journal_t *);
+ extern int       jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *inode);
+-extern int       jbd2_journal_begin_ordered_truncate(struct jbd2_inode *inode, loff_t new_size);
++extern int       jbd2_journal_begin_ordered_truncate(journal_t *journal,
++                              struct jbd2_inode *inode, loff_t new_size);
+ extern void      jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, struct inode *inode);
+ extern void      jbd2_journal_release_jbd_inode(journal_t *journal, struct jbd2_inode *jinode);
+--- kernel-maemo-2.6.28.test.orig/include/linux/klist.h
++++ kernel-maemo-2.6.28.test/include/linux/klist.h
+@@ -23,7 +23,7 @@
+       struct list_head        k_list;
+       void                    (*get)(struct klist_node *);
+       void                    (*put)(struct klist_node *);
+-};
++} __attribute__ ((aligned (4)));
+ #define KLIST_INIT(_name, _get, _put)                                 \
+       { .k_lock       = __SPIN_LOCK_UNLOCKED(_name.k_lock),           \
+--- kernel-maemo-2.6.28.test.orig/include/linux/kvm.h
++++ kernel-maemo-2.6.28.test/include/linux/kvm.h
+@@ -387,6 +387,8 @@
+ #define KVM_CAP_DEVICE_ASSIGNMENT 17
+ #endif
+ #define KVM_CAP_IOMMU 18
++/* Bug in KVM_SET_USER_MEMORY_REGION fixed: */
++#define KVM_CAP_DESTROY_MEMORY_REGION_WORKS 21
+ /*
+  * ioctls for VM fds
+--- kernel-maemo-2.6.28.test.orig/include/linux/mm.h
++++ kernel-maemo-2.6.28.test/include/linux/mm.h
+@@ -253,7 +253,6 @@
+  */
+ static inline int get_page_unless_zero(struct page *page)
+ {
+-      VM_BUG_ON(PageTail(page));
+       return atomic_inc_not_zero(&page->_count);
+ }
+@@ -1028,10 +1027,23 @@
+ typedef int (*work_fn_t)(unsigned long, unsigned long, void *);
+ extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data);
+ extern void sparse_memory_present_with_active_regions(int nid);
+-#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
+-extern int early_pfn_to_nid(unsigned long pfn);
+-#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
+ #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
++
++#if !defined(CONFIG_ARCH_POPULATES_NODE_MAP) && \
++    !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
++static inline int __early_pfn_to_nid(unsigned long pfn)
++{
++      return 0;
++}
++#else
++/* please see mm/page_alloc.c */
++extern int __meminit early_pfn_to_nid(unsigned long pfn);
++#ifdef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
++/* there is a per-arch backend function. */
++extern int __meminit __early_pfn_to_nid(unsigned long pfn);
++#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
++#endif
++
+ extern void set_dma_reserve(unsigned long new_dma_reserve);
+ extern void memmap_init_zone(unsigned long, int, unsigned long,
+                               unsigned long, enum memmap_context);
+--- kernel-maemo-2.6.28.test.orig/include/linux/mmzone.h
++++ kernel-maemo-2.6.28.test/include/linux/mmzone.h
+@@ -1067,7 +1067,7 @@
+ #endif /* CONFIG_SPARSEMEM */
+ #ifdef CONFIG_NODES_SPAN_OTHER_NODES
+-#define early_pfn_in_nid(pfn, nid)    (early_pfn_to_nid(pfn) == (nid))
++bool early_pfn_in_nid(unsigned long pfn, int nid);
+ #else
+ #define early_pfn_in_nid(pfn, nid)    (1)
+ #endif
+--- kernel-maemo-2.6.28.test.orig/include/linux/mod_devicetable.h
++++ kernel-maemo-2.6.28.test/include/linux/mod_devicetable.h
+@@ -443,6 +443,13 @@
+       struct dmi_strmatch matches[4];
+       void *driver_data;
+ };
++/*
++ * struct dmi_device_id appears during expansion of
++ * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
++ * but this is enough for gcc 3.4.6 to error out:
++ *    error: storage size of '__mod_dmi_device_table' isn't known
++ */
++#define dmi_device_id dmi_system_id
+ #endif
+ #define DMI_MATCH(a, b)       { a, b }
+--- kernel-maemo-2.6.28.test.orig/include/linux/module.h
++++ kernel-maemo-2.6.28.test/include/linux/module.h
+@@ -391,7 +391,6 @@
+ static inline void __module_get(struct module *module)
+ {
+       if (module) {
+-              BUG_ON(module_refcount(module) == 0);
+               local_inc(&module->ref[get_cpu()].count);
+               put_cpu();
+       }
+--- kernel-maemo-2.6.28.test.orig/include/linux/pagemap.h
++++ kernel-maemo-2.6.28.test/include/linux/pagemap.h
+@@ -18,9 +18,14 @@
+  * Bits in mapping->flags.  The lower __GFP_BITS_SHIFT bits are the page
+  * allocation mode flags.
+  */
+-#define       AS_EIO          (__GFP_BITS_SHIFT + 0)  /* IO error on async write */
+-#define AS_ENOSPC     (__GFP_BITS_SHIFT + 1)  /* ENOSPC on async write */
+-#define AS_MM_ALL_LOCKS       (__GFP_BITS_SHIFT + 2)  /* under mm_take_all_locks() */
++enum mapping_flags {
++      AS_EIO          = __GFP_BITS_SHIFT + 0, /* IO error on async write */
++      AS_ENOSPC       = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
++      AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
++#ifdef CONFIG_UNEVICTABLE_LRU
++      AS_UNEVICTABLE  = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
++#endif
++};
+ static inline void mapping_set_error(struct address_space *mapping, int error)
+ {
+@@ -33,7 +38,6 @@
+ }
+ #ifdef CONFIG_UNEVICTABLE_LRU
+-#define AS_UNEVICTABLE        (__GFP_BITS_SHIFT + 2)  /* e.g., ramdisk, SHM_LOCK */
+ static inline void mapping_set_unevictable(struct address_space *mapping)
+ {
+@@ -241,7 +245,8 @@
+ unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
+                       int tag, unsigned int nr_pages, struct page **pages);
+-struct page *__grab_cache_page(struct address_space *mapping, pgoff_t index);
++struct page *grab_cache_page_write_begin(struct address_space *mapping,
++                      pgoff_t index, unsigned flags);
+ /*
+  * Returns locked page at given index in given cache, creating it if needed.
+--- kernel-maemo-2.6.28.test.orig/include/linux/pci.h
++++ kernel-maemo-2.6.28.test/include/linux/pci.h
+@@ -651,7 +651,7 @@
+ void pci_disable_rom(struct pci_dev *pdev);
+ void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
+ void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
+-size_t pci_get_rom_size(void __iomem *rom, size_t size);
++size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size);
+ /* Power management related routines */
+ int pci_save_state(struct pci_dev *dev);
+--- kernel-maemo-2.6.28.test.orig/include/linux/pci_ids.h
++++ kernel-maemo-2.6.28.test/include/linux/pci_ids.h
+@@ -1312,6 +1312,7 @@
+ #define PCI_DEVICE_ID_VIA_VT3351      0x0351
+ #define PCI_DEVICE_ID_VIA_VT3364      0x0364
+ #define PCI_DEVICE_ID_VIA_8371_0      0x0391
++#define PCI_DEVICE_ID_VIA_6415                0x0415
+ #define PCI_DEVICE_ID_VIA_8501_0      0x0501
+ #define PCI_DEVICE_ID_VIA_82C561      0x0561
+ #define PCI_DEVICE_ID_VIA_82C586_1    0x0571
+@@ -1357,6 +1358,7 @@
+ #define PCI_DEVICE_ID_VIA_8783_0      0x3208
+ #define PCI_DEVICE_ID_VIA_8237                0x3227
+ #define PCI_DEVICE_ID_VIA_8251                0x3287
++#define PCI_DEVICE_ID_VIA_8261                0x3402
+ #define PCI_DEVICE_ID_VIA_8237A               0x3337
+ #define PCI_DEVICE_ID_VIA_8237S               0x3372
+ #define PCI_DEVICE_ID_VIA_SATA_EIDE   0x5324
+@@ -1366,10 +1368,13 @@
+ #define PCI_DEVICE_ID_VIA_CX700               0x8324
+ #define PCI_DEVICE_ID_VIA_CX700_IDE   0x0581
+ #define PCI_DEVICE_ID_VIA_VX800               0x8353
++#define PCI_DEVICE_ID_VIA_VX855               0x8409
+ #define PCI_DEVICE_ID_VIA_8371_1      0x8391
+ #define PCI_DEVICE_ID_VIA_82C598_1    0x8598
+ #define PCI_DEVICE_ID_VIA_838X_1      0xB188
+ #define PCI_DEVICE_ID_VIA_83_87XX_1   0xB198
++#define PCI_DEVICE_ID_VIA_C409_IDE    0XC409
++#define PCI_DEVICE_ID_VIA_ANON                0xFFFF
+ #define PCI_VENDOR_ID_SIEMENS           0x110A
+ #define PCI_DEVICE_ID_SIEMENS_DSCC4     0x2102
+@@ -1440,6 +1445,7 @@
+ #define PCI_DEVICE_ID_DIGI_DF_M_E     0x0071
+ #define PCI_DEVICE_ID_DIGI_DF_M_IOM2_A        0x0072
+ #define PCI_DEVICE_ID_DIGI_DF_M_A     0x0073
++#define PCI_DEVICE_ID_DIGI_NEO_8      0x00B1
+ #define PCI_DEVICE_ID_NEO_2DB9          0x00C8
+ #define PCI_DEVICE_ID_NEO_2DB9PRI       0x00C9
+ #define PCI_DEVICE_ID_NEO_2RJ45         0x00CA
+@@ -1766,6 +1772,7 @@
+ #define PCI_DEVICE_ID_SIIG_8S_20x_650 0x2081
+ #define PCI_DEVICE_ID_SIIG_8S_20x_850 0x2082
+ #define PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL  0x2050
++#define PCI_SUBDEVICE_ID_SIIG_DUAL_SERIAL     0x2530
+ #define PCI_VENDOR_ID_RADISYS         0x1331
+@@ -1795,6 +1802,7 @@
+ #define PCI_DEVICE_ID_SEALEVEL_UCOMM232       0x7202
+ #define PCI_DEVICE_ID_SEALEVEL_COMM4  0x7401
+ #define PCI_DEVICE_ID_SEALEVEL_COMM8  0x7801
++#define PCI_DEVICE_ID_SEALEVEL_7803   0x7803
+ #define PCI_DEVICE_ID_SEALEVEL_UCOMM8 0x7804
+ #define PCI_VENDOR_ID_HYPERCOPE               0x1365
+@@ -2171,6 +2179,7 @@
+ #define PCI_DEVICE_ID_RDC_R6040               0x6040
+ #define PCI_DEVICE_ID_RDC_R6060               0x6060
+ #define PCI_DEVICE_ID_RDC_R6061               0x6061
++#define PCI_DEVICE_ID_RDC_D1010               0x1010
+ #define PCI_VENDOR_ID_LENOVO          0x17aa
+@@ -2309,6 +2318,9 @@
+ #define PCI_DEVICE_ID_INTEL_82378     0x0484
+ #define PCI_DEVICE_ID_INTEL_I960      0x0960
+ #define PCI_DEVICE_ID_INTEL_I960RM    0x0962
++#define PCI_DEVICE_ID_INTEL_8257X_SOL 0x1062
++#define PCI_DEVICE_ID_INTEL_82573E_SOL        0x1085
++#define PCI_DEVICE_ID_INTEL_82573L_SOL        0x108F
+ #define PCI_DEVICE_ID_INTEL_82815_MC  0x1130
+ #define PCI_DEVICE_ID_INTEL_82815_CGC 0x1132
+ #define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221
+@@ -2412,6 +2424,7 @@
+ #define PCI_DEVICE_ID_INTEL_ICH7_0    0x27b8
+ #define PCI_DEVICE_ID_INTEL_ICH7_1    0x27b9
+ #define PCI_DEVICE_ID_INTEL_ICH7_30   0x27b0
++#define PCI_DEVICE_ID_INTEL_TGP_LPC   0x27bc
+ #define PCI_DEVICE_ID_INTEL_ICH7_31   0x27bd
+ #define PCI_DEVICE_ID_INTEL_ICH7_17   0x27da
+ #define PCI_DEVICE_ID_INTEL_ICH7_19   0x27dd
+--- kernel-maemo-2.6.28.test.orig/include/linux/pci_regs.h
++++ kernel-maemo-2.6.28.test/include/linux/pci_regs.h
+@@ -234,7 +234,7 @@
+ #define  PCI_PM_CAP_PME_SHIFT 11      /* Start of the PME Mask in PMC */
+ #define PCI_PM_CTRL           4       /* PM control and status register */
+ #define  PCI_PM_CTRL_STATE_MASK       0x0003  /* Current power state (D0 to D3) */
+-#define  PCI_PM_CTRL_NO_SOFT_RESET    0x0004  /* No reset for D3hot->D0 */
++#define  PCI_PM_CTRL_NO_SOFT_RESET    0x0008  /* No reset for D3hot->D0 */
+ #define  PCI_PM_CTRL_PME_ENABLE       0x0100  /* PME pin enable */
+ #define  PCI_PM_CTRL_DATA_SEL_MASK    0x1e00  /* Data select (??) */
+ #define  PCI_PM_CTRL_DATA_SCALE_MASK  0x6000  /* Data scale (??) */
+--- kernel-maemo-2.6.28.test.orig/include/linux/pid.h
++++ kernel-maemo-2.6.28.test/include/linux/pid.h
+@@ -123,6 +123,24 @@
+ extern void free_pid(struct pid *pid);
+ /*
++ * ns_of_pid() returns the pid namespace in which the specified pid was
++ * allocated.
++ *
++ * NOTE:
++ *    ns_of_pid() is expected to be called for a process (task) that has
++ *    an attached 'struct pid' (see attach_pid(), detach_pid()) i.e @pid
++ *    is expected to be non-NULL. If @pid is NULL, caller should handle
++ *    the resulting NULL pid-ns.
++ */
++static inline struct pid_namespace *ns_of_pid(struct pid *pid)
++{
++      struct pid_namespace *ns = NULL;
++      if (pid)
++              ns = pid->numbers[pid->level].ns;
++      return ns;
++}
++
++/*
+  * the helpers to get the pid's id seen from different namespaces
+  *
+  * pid_nr()    : global id, i.e. the id seen from the init namespace;
+--- kernel-maemo-2.6.28.test.orig/include/linux/radix-tree.h
++++ kernel-maemo-2.6.28.test/include/linux/radix-tree.h
+@@ -136,7 +136,7 @@
+  */
+ static inline void *radix_tree_deref_slot(void **pslot)
+ {
+-      void *ret = *pslot;
++      void *ret = rcu_dereference(*pslot);
+       if (unlikely(radix_tree_is_indirect_ptr(ret)))
+               ret = RADIX_TREE_RETRY;
+       return ret;
+--- kernel-maemo-2.6.28.test.orig/include/linux/raid/md_k.h
++++ kernel-maemo-2.6.28.test/include/linux/raid/md_k.h
+@@ -245,6 +245,8 @@
+                                                        * file in sysfs.
+                                                        */
++      struct work_struct del_work;    /* used for delayed sysfs removal */
++
+       spinlock_t                      write_lock;
+       wait_queue_head_t               sb_wait;        /* for waiting on superblock updates */
+       atomic_t                        pending_writes; /* number of active superblock writes */
+--- kernel-maemo-2.6.28.test.orig/include/linux/sched.h
++++ kernel-maemo-2.6.28.test/include/linux/sched.h
+@@ -201,7 +201,8 @@
+ #define task_is_stopped_or_traced(task)       \
+                       ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
+ #define task_contributes_to_load(task)        \
+-                              ((task->state & TASK_UNINTERRUPTIBLE) != 0)
++                              ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
++                               (task->flags & PF_FROZEN) == 0)
+ #define __set_task_state(tsk, state_value)            \
+       do { (tsk)->state = (state_value); } while (0)
+--- kernel-maemo-2.6.28.test.orig/include/linux/seq_file.h
++++ kernel-maemo-2.6.28.test/include/linux/seq_file.h
+@@ -19,6 +19,7 @@
+       size_t from;
+       size_t count;
+       loff_t index;
++      loff_t read_pos;
+       u64 version;
+       struct mutex lock;
+       const struct seq_operations *op;
+--- kernel-maemo-2.6.28.test.orig/include/linux/serial_core.h
++++ kernel-maemo-2.6.28.test/include/linux/serial_core.h
+@@ -288,6 +288,7 @@
+ #define UPF_HARDPPS_CD                ((__force upf_t) (1 << 11))
+ #define UPF_LOW_LATENCY               ((__force upf_t) (1 << 13))
+ #define UPF_BUGGY_UART                ((__force upf_t) (1 << 14))
++#define UPF_NO_TXEN_TEST      ((__force upf_t) (1 << 15))
+ #define UPF_MAGIC_MULTIPLIER  ((__force upf_t) (1 << 16))
+ #define UPF_CONS_FLOW         ((__force upf_t) (1 << 23))
+ #define UPF_SHARE_IRQ         ((__force upf_t) (1 << 24))
+--- kernel-maemo-2.6.28.test.orig/include/linux/skbuff.h
++++ kernel-maemo-2.6.28.test/include/linux/skbuff.h
+@@ -411,15 +411,6 @@
+                                    void *here);
+ extern void         skb_under_panic(struct sk_buff *skb, int len,
+                                     void *here);
+-extern void         skb_truesize_bug(struct sk_buff *skb);
+-
+-static inline void skb_truesize_check(struct sk_buff *skb)
+-{
+-      int len = sizeof(struct sk_buff) + skb->len;
+-
+-      if (unlikely((int)skb->truesize < len))
+-              skb_truesize_bug(skb);
+-}
+ extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
+                       int getfrag(void *from, char *to, int offset,
+--- kernel-maemo-2.6.28.test.orig/include/linux/slab_def.h
++++ kernel-maemo-2.6.28.test/include/linux/slab_def.h
+@@ -43,10 +43,7 @@
+                       i++;
+ #include <linux/kmalloc_sizes.h>
+ #undef CACHE
+-              {
+-                      extern void __you_cannot_kmalloc_that_much(void);
+-                      __you_cannot_kmalloc_that_much();
+-              }
++              return NULL;
+ found:
+ #ifdef CONFIG_ZONE_DMA
+               if (flags & GFP_DMA)
+@@ -77,10 +74,7 @@
+                       i++;
+ #include <linux/kmalloc_sizes.h>
+ #undef CACHE
+-              {
+-                      extern void __you_cannot_kmalloc_that_much(void);
+-                      __you_cannot_kmalloc_that_much();
+-              }
++              return NULL;
+ found:
+ #ifdef CONFIG_ZONE_DMA
+               if (flags & GFP_DMA)
+--- kernel-maemo-2.6.28.test.orig/include/linux/syscalls.h
++++ kernel-maemo-2.6.28.test/include/linux/syscalls.h
+@@ -54,6 +54,7 @@
+ struct compat_timeval;
+ struct robust_list_head;
+ struct getcpu_cache;
++struct old_linux_dirent;
+ #include <linux/types.h>
+ #include <linux/aio_abi.h>
+@@ -65,6 +66,74 @@
+ #include <linux/quota.h>
+ #include <linux/key.h>
++#define __SC_DECL1(t1, a1)    t1 a1
++#define __SC_DECL2(t2, a2, ...) t2 a2, __SC_DECL1(__VA_ARGS__)
++#define __SC_DECL3(t3, a3, ...) t3 a3, __SC_DECL2(__VA_ARGS__)
++#define __SC_DECL4(t4, a4, ...) t4 a4, __SC_DECL3(__VA_ARGS__)
++#define __SC_DECL5(t5, a5, ...) t5 a5, __SC_DECL4(__VA_ARGS__)
++#define __SC_DECL6(t6, a6, ...) t6 a6, __SC_DECL5(__VA_ARGS__)
++
++#define __SC_LONG1(t1, a1)    long a1
++#define __SC_LONG2(t2, a2, ...) long a2, __SC_LONG1(__VA_ARGS__)
++#define __SC_LONG3(t3, a3, ...) long a3, __SC_LONG2(__VA_ARGS__)
++#define __SC_LONG4(t4, a4, ...) long a4, __SC_LONG3(__VA_ARGS__)
++#define __SC_LONG5(t5, a5, ...) long a5, __SC_LONG4(__VA_ARGS__)
++#define __SC_LONG6(t6, a6, ...) long a6, __SC_LONG5(__VA_ARGS__)
++
++#define __SC_CAST1(t1, a1)    (t1) a1
++#define __SC_CAST2(t2, a2, ...) (t2) a2, __SC_CAST1(__VA_ARGS__)
++#define __SC_CAST3(t3, a3, ...) (t3) a3, __SC_CAST2(__VA_ARGS__)
++#define __SC_CAST4(t4, a4, ...) (t4) a4, __SC_CAST3(__VA_ARGS__)
++#define __SC_CAST5(t5, a5, ...) (t5) a5, __SC_CAST4(__VA_ARGS__)
++#define __SC_CAST6(t6, a6, ...) (t6) a6, __SC_CAST5(__VA_ARGS__)
++
++#define __SC_TEST(type)               BUILD_BUG_ON(sizeof(type) > sizeof(long))
++#define __SC_TEST1(t1, a1)    __SC_TEST(t1)
++#define __SC_TEST2(t2, a2, ...)       __SC_TEST(t2); __SC_TEST1(__VA_ARGS__)
++#define __SC_TEST3(t3, a3, ...)       __SC_TEST(t3); __SC_TEST2(__VA_ARGS__)
++#define __SC_TEST4(t4, a4, ...)       __SC_TEST(t4); __SC_TEST3(__VA_ARGS__)
++#define __SC_TEST5(t5, a5, ...)       __SC_TEST(t5); __SC_TEST4(__VA_ARGS__)
++#define __SC_TEST6(t6, a6, ...)       __SC_TEST(t6); __SC_TEST5(__VA_ARGS__)
++
++#define SYSCALL_DEFINE0(name)    asmlinkage long sys_##name(void)
++#define SYSCALL_DEFINE1(name, ...) SYSCALL_DEFINEx(1, _##name, __VA_ARGS__)
++#define SYSCALL_DEFINE2(name, ...) SYSCALL_DEFINEx(2, _##name, __VA_ARGS__)
++#define SYSCALL_DEFINE3(name, ...) SYSCALL_DEFINEx(3, _##name, __VA_ARGS__)
++#define SYSCALL_DEFINE4(name, ...) SYSCALL_DEFINEx(4, _##name, __VA_ARGS__)
++#define SYSCALL_DEFINE5(name, ...) SYSCALL_DEFINEx(5, _##name, __VA_ARGS__)
++#define SYSCALL_DEFINE6(name, ...) SYSCALL_DEFINEx(6, _##name, __VA_ARGS__)
++
++#ifdef CONFIG_PPC64
++#define SYSCALL_ALIAS(alias, name)                                    \
++      asm ("\t.globl " #alias "\n\t.set " #alias ", " #name "\n"      \
++           "\t.globl ." #alias "\n\t.set ." #alias ", ." #name)
++#else
++#define SYSCALL_ALIAS(alias, name)                                    \
++      asm ("\t.globl " #alias "\n\t.set " #alias ", " #name)
++#endif
++
++#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
++
++#define SYSCALL_DEFINE(name) static inline long SYSC_##name
++#define SYSCALL_DEFINEx(x, name, ...)                                 \
++      asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__));           \
++      static inline long SYSC##name(__SC_DECL##x(__VA_ARGS__));       \
++      asmlinkage long SyS##name(__SC_LONG##x(__VA_ARGS__))            \
++      {                                                               \
++              __SC_TEST##x(__VA_ARGS__);                              \
++              return (long) SYSC##name(__SC_CAST##x(__VA_ARGS__));    \
++      }                                                               \
++      SYSCALL_ALIAS(sys##name, SyS##name);                            \
++      static inline long SYSC##name(__SC_DECL##x(__VA_ARGS__))
++
++#else /* CONFIG_HAVE_SYSCALL_WRAPPERS */
++
++#define SYSCALL_DEFINE(name) asmlinkage long sys_##name
++#define SYSCALL_DEFINEx(x, name, ...)                                 \
++      asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__))
++
++#endif /* CONFIG_HAVE_SYSCALL_WRAPPERS */
++
+ asmlinkage long sys_time(time_t __user *tloc);
+ asmlinkage long sys_stime(time_t __user *tptr);
+ asmlinkage long sys_gettimeofday(struct timeval __user *tv,
+@@ -77,7 +146,7 @@
+ asmlinkage long sys_gettid(void);
+ asmlinkage long sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp);
+-asmlinkage unsigned long sys_alarm(unsigned int seconds);
++asmlinkage long sys_alarm(unsigned int seconds);
+ asmlinkage long sys_getpid(void);
+ asmlinkage long sys_getppid(void);
+ asmlinkage long sys_getuid(void);
+@@ -166,7 +235,7 @@
+                               unsigned long flags);
+ asmlinkage long sys_exit(int error_code);
+-asmlinkage void sys_exit_group(int error_code);
++asmlinkage long sys_exit_group(int error_code);
+ asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr,
+                               int options, struct rusage __user *ru);
+ asmlinkage long sys_waitid(int which, pid_t pid,
+@@ -196,7 +265,7 @@
+ asmlinkage long sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo);
+ asmlinkage long sys_sgetmask(void);
+ asmlinkage long sys_ssetmask(int newmask);
+-asmlinkage unsigned long sys_signal(int sig, __sighandler_t handler);
++asmlinkage long sys_signal(int sig, __sighandler_t handler);
+ asmlinkage long sys_pause(void);
+ asmlinkage long sys_sync(void);
+@@ -246,29 +315,29 @@
+                             const void __user *value, size_t size, int flags);
+ asmlinkage long sys_fsetxattr(int fd, const char __user *name,
+                             const void __user *value, size_t size, int flags);
+-asmlinkage ssize_t sys_getxattr(const char __user *path, const char __user *name,
+-                              void __user *value, size_t size);
+-asmlinkage ssize_t sys_lgetxattr(const char __user *path, const char __user *name,
+-                              void __user *value, size_t size);
+-asmlinkage ssize_t sys_fgetxattr(int fd, const char __user *name,
+-                              void __user *value, size_t size);
+-asmlinkage ssize_t sys_listxattr(const char __user *path, char __user *list,
+-                              size_t size);
+-asmlinkage ssize_t sys_llistxattr(const char __user *path, char __user *list,
+-                              size_t size);
+-asmlinkage ssize_t sys_flistxattr(int fd, char __user *list, size_t size);
++asmlinkage long sys_getxattr(const char __user *path, const char __user *name,
++                           void __user *value, size_t size);
++asmlinkage long sys_lgetxattr(const char __user *path, const char __user *name,
++                            void __user *value, size_t size);
++asmlinkage long sys_fgetxattr(int fd, const char __user *name,
++                            void __user *value, size_t size);
++asmlinkage long sys_listxattr(const char __user *path, char __user *list,
++                            size_t size);
++asmlinkage long sys_llistxattr(const char __user *path, char __user *list,
++                             size_t size);
++asmlinkage long sys_flistxattr(int fd, char __user *list, size_t size);
+ asmlinkage long sys_removexattr(const char __user *path,
+                               const char __user *name);
+ asmlinkage long sys_lremovexattr(const char __user *path,
+                                const char __user *name);
+ asmlinkage long sys_fremovexattr(int fd, const char __user *name);
+-asmlinkage unsigned long sys_brk(unsigned long brk);
++asmlinkage long sys_brk(unsigned long brk);
+ asmlinkage long sys_mprotect(unsigned long start, size_t len,
+                               unsigned long prot);
+-asmlinkage unsigned long sys_mremap(unsigned long addr,
+-                              unsigned long old_len, unsigned long new_len,
+-                              unsigned long flags, unsigned long new_addr);
++asmlinkage long sys_mremap(unsigned long addr,
++                         unsigned long old_len, unsigned long new_len,
++                         unsigned long flags, unsigned long new_addr);
+ asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
+                       unsigned long prot, unsigned long pgoff,
+                       unsigned long flags);
+@@ -321,10 +390,10 @@
+                               struct iocb __user * __user *);
+ asmlinkage long sys_io_cancel(aio_context_t ctx_id, struct iocb __user *iocb,
+                             struct io_event __user *result);
+-asmlinkage ssize_t sys_sendfile(int out_fd, int in_fd,
+-                              off_t __user *offset, size_t count);
+-asmlinkage ssize_t sys_sendfile64(int out_fd, int in_fd,
+-                              loff_t __user *offset, size_t count);
++asmlinkage long sys_sendfile(int out_fd, int in_fd,
++                           off_t __user *offset, size_t count);
++asmlinkage long sys_sendfile64(int out_fd, int in_fd,
++                             loff_t __user *offset, size_t count);
+ asmlinkage long sys_readlink(const char __user *path,
+                               char __user *buf, int bufsiz);
+ asmlinkage long sys_creat(const char __user *pathname, int mode);
+@@ -368,26 +437,25 @@
+                               struct utimbuf __user *times);
+ asmlinkage long sys_utimes(char __user *filename,
+                               struct timeval __user *utimes);
+-asmlinkage off_t sys_lseek(unsigned int fd, off_t offset,
+-                              unsigned int origin);
++asmlinkage long sys_lseek(unsigned int fd, off_t offset,
++                        unsigned int origin);
+ asmlinkage long sys_llseek(unsigned int fd, unsigned long offset_high,
+                       unsigned long offset_low, loff_t __user *result,
+                       unsigned int origin);
+-asmlinkage ssize_t sys_read(unsigned int fd, char __user *buf,
+-                              size_t count);
+-asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count);
+-asmlinkage ssize_t sys_readv(unsigned long fd,
+-                              const struct iovec __user *vec,
+-                              unsigned long vlen);
+-asmlinkage ssize_t sys_write(unsigned int fd, const char __user *buf,
+-                              size_t count);
+-asmlinkage ssize_t sys_writev(unsigned long fd,
+-                              const struct iovec __user *vec,
+-                              unsigned long vlen);
+-asmlinkage ssize_t sys_pread64(unsigned int fd, char __user *buf,
+-                              size_t count, loff_t pos);
+-asmlinkage ssize_t sys_pwrite64(unsigned int fd, const char __user *buf,
+-                              size_t count, loff_t pos);
++asmlinkage long sys_read(unsigned int fd, char __user *buf, size_t count);
++asmlinkage long sys_readahead(int fd, loff_t offset, size_t count);
++asmlinkage long sys_readv(unsigned long fd,
++                        const struct iovec __user *vec,
++                        unsigned long vlen);
++asmlinkage long sys_write(unsigned int fd, const char __user *buf,
++                        size_t count);
++asmlinkage long sys_writev(unsigned long fd,
++                         const struct iovec __user *vec,
++                         unsigned long vlen);
++asmlinkage long sys_pread64(unsigned int fd, char __user *buf,
++                          size_t count, loff_t pos);
++asmlinkage long sys_pwrite64(unsigned int fd, const char __user *buf,
++                           size_t count, loff_t pos);
+ asmlinkage long sys_getcwd(char __user *buf, unsigned long size);
+ asmlinkage long sys_mkdir(const char __user *pathname, int mode);
+ asmlinkage long sys_chdir(const char __user *filename);
+@@ -476,7 +544,7 @@
+ asmlinkage long sys_mq_open(const char __user *name, int oflag, mode_t mode, struct mq_attr __user *attr);
+ asmlinkage long sys_mq_unlink(const char __user *name);
+ asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *msg_ptr, size_t msg_len, unsigned int msg_prio, const struct timespec __user *abs_timeout);
+-asmlinkage ssize_t sys_mq_timedreceive(mqd_t mqdes, char __user *msg_ptr, size_t msg_len, unsigned int __user *msg_prio, const struct timespec __user *abs_timeout);
++asmlinkage long sys_mq_timedreceive(mqd_t mqdes, char __user *msg_ptr, size_t msg_len, unsigned int __user *msg_prio, const struct timespec __user *abs_timeout);
+ asmlinkage long sys_mq_notify(mqd_t mqdes, const struct sigevent __user *notification);
+ asmlinkage long sys_mq_getsetattr(mqd_t mqdes, const struct mq_attr __user *mqstat, struct mq_attr __user *omqstat);
+@@ -530,11 +598,6 @@
+                               const int __user *nodes,
+                               int __user *status,
+                               int flags);
+-asmlinkage long compat_sys_move_pages(pid_t pid, unsigned long nr_page,
+-                              __u32 __user *pages,
+-                              const int __user *nodes,
+-                              int __user *status,
+-                              int flags);
+ asmlinkage long sys_mbind(unsigned long start, unsigned long len,
+                               unsigned long mode,
+                               unsigned long __user *nmask,
+@@ -549,7 +612,7 @@
+ asmlinkage long sys_inotify_init1(int flags);
+ asmlinkage long sys_inotify_add_watch(int fd, const char __user *path,
+                                       u32 mask);
+-asmlinkage long sys_inotify_rm_watch(int fd, u32 wd);
++asmlinkage long sys_inotify_rm_watch(int fd, __s32 wd);
+ asmlinkage long sys_spu_run(int fd, __u32 __user *unpc,
+                                __u32 __user *ustatus);
+@@ -583,13 +646,6 @@
+                              int bufsiz);
+ asmlinkage long sys_utimensat(int dfd, char __user *filename,
+                               struct timespec __user *utimes, int flags);
+-asmlinkage long compat_sys_futimesat(unsigned int dfd, char __user *filename,
+-                                   struct compat_timeval __user *t);
+-asmlinkage long compat_sys_newfstatat(unsigned int dfd, char __user * filename,
+-                                    struct compat_stat __user *statbuf,
+-                                    int flag);
+-asmlinkage long compat_sys_openat(unsigned int dfd, const char __user *filename,
+-                                 int flags, int mode);
+ asmlinkage long sys_unshare(unsigned long unshare_flags);
+ asmlinkage long sys_splice(int fd_in, loff_t __user *off_in,
+@@ -621,6 +677,15 @@
+ asmlinkage long sys_eventfd(unsigned int count);
+ asmlinkage long sys_eventfd2(unsigned int count, int flags);
+ asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len);
++asmlinkage long sys_old_readdir(unsigned int, struct old_linux_dirent __user *, unsigned int);
++asmlinkage long sys_pselect6(int, fd_set __user *, fd_set __user *,
++                           fd_set __user *, struct timespec __user *,
++                           void __user *);
++asmlinkage long sys_ppoll(struct pollfd __user *, unsigned int,
++                        struct timespec __user *, const sigset_t __user *,
++                        size_t);
++asmlinkage long sys_pipe2(int __user *, int);
++asmlinkage long sys_pipe(int __user *);
+ int kernel_execve(const char *filename, char *const argv[], char *const envp[]);
+--- kernel-maemo-2.6.28.test.orig/include/linux/time.h
++++ kernel-maemo-2.6.28.test/include/linux/time.h
+@@ -105,6 +105,7 @@
+ extern int update_persistent_clock(struct timespec now);
+ extern int no_sync_cmos_clock __read_mostly;
+ void timekeeping_init(void);
++extern int timekeeping_suspended;
+ unsigned long get_seconds(void);
+ struct timespec current_kernel_time(void);
+--- kernel-maemo-2.6.28.test.orig/include/linux/timerfd.h
++++ kernel-maemo-2.6.28.test/include/linux/timerfd.h
+@@ -11,13 +11,21 @@
+ /* For O_CLOEXEC and O_NONBLOCK */
+ #include <linux/fcntl.h>
+-/* Flags for timerfd_settime.  */
++/*
++ * CAREFUL: Check include/asm-generic/fcntl.h when defining
++ * new flags, since they might collide with O_* ones. We want
++ * to re-use O_* flags that couldn't possibly have a meaning
++ * from eventfd, in order to leave a free define-space for
++ * shared O_* flags.
++ */
+ #define TFD_TIMER_ABSTIME (1 << 0)
+-
+-/* Flags for timerfd_create.  */
+ #define TFD_CLOEXEC O_CLOEXEC
+ #define TFD_NONBLOCK O_NONBLOCK
++#define TFD_SHARED_FCNTL_FLAGS (TFD_CLOEXEC | TFD_NONBLOCK)
++/* Flags for timerfd_create.  */
++#define TFD_CREATE_FLAGS TFD_SHARED_FCNTL_FLAGS
++/* Flags for timerfd_settime.  */
++#define TFD_SETTIME_FLAGS TFD_TIMER_ABSTIME
+ #endif /* _LINUX_TIMERFD_H */
+-
+--- kernel-maemo-2.6.28.test.orig/include/linux/usb/quirks.h
++++ kernel-maemo-2.6.28.test/include/linux/usb/quirks.h
+@@ -16,4 +16,7 @@
+ /* device can't handle Set-Interface requests */
+ #define USB_QUIRK_NO_SET_INTF         0x00000004
++/* device can't handle its Configuration or Interface strings */
++#define USB_QUIRK_CONFIG_INTF_STRINGS 0x00000008
++
+ #endif /* __LINUX_USB_QUIRKS_H */
+--- kernel-maemo-2.6.28.test.orig/include/linux/usb_usual.h
++++ kernel-maemo-2.6.28.test/include/linux/usb_usual.h
+@@ -52,8 +52,9 @@
+       US_FLAG(MAX_SECTORS_MIN,0x00002000)                     \
+               /* Sets max_sectors to arch min */              \
+       US_FLAG(BULK_IGNORE_TAG,0x00004000)                     \
+-              /* Ignore tag mismatch in bulk operations */
+-
++              /* Ignore tag mismatch in bulk operations */    \
++      US_FLAG(CAPACITY_OK,    0x00010000)                     \
++              /* READ CAPACITY response is correct */
+ #define US_FLAG(name, value)  US_FL_##name = value ,
+ enum { US_DO_ALL_FLAGS };
+--- kernel-maemo-2.6.28.test.orig/include/linux/wait.h
++++ kernel-maemo-2.6.28.test/include/linux/wait.h
+@@ -132,6 +132,8 @@
+       list_del(&old->task_list);
+ }
++void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
++                      int nr_exclusive, int sync, void *key);
+ void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
+ extern void __wake_up_locked(wait_queue_head_t *q, unsigned int mode);
+ extern void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
+@@ -333,16 +335,19 @@
+       for (;;) {                                                      \
+               prepare_to_wait_exclusive(&wq, &__wait,                 \
+                                       TASK_INTERRUPTIBLE);            \
+-              if (condition)                                          \
++              if (condition) {                                        \
++                      finish_wait(&wq, &__wait);                      \
+                       break;                                          \
++              }                                                       \
+               if (!signal_pending(current)) {                         \
+                       schedule();                                     \
+                       continue;                                       \
+               }                                                       \
+               ret = -ERESTARTSYS;                                     \
++              abort_exclusive_wait(&wq, &__wait,                      \
++                              TASK_INTERRUPTIBLE, NULL);              \
+               break;                                                  \
+       }                                                               \
+-      finish_wait(&wq, &__wait);                                      \
+ } while (0)
+ #define wait_event_interruptible_exclusive(wq, condition)             \
+@@ -431,6 +436,8 @@
+ void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
+ void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
+ void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
++void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
++                      unsigned int mode, void *key);
+ int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
+ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
+--- kernel-maemo-2.6.28.test.orig/include/linux/writeback.h
++++ kernel-maemo-2.6.28.test/include/linux/writeback.h
+@@ -30,7 +30,6 @@
+ enum writeback_sync_modes {
+       WB_SYNC_NONE,   /* Don't wait on anything */
+       WB_SYNC_ALL,    /* Wait on every mapping */
+-      WB_SYNC_HOLD,   /* Hold the inode on sb_dirty for sys_sync() */
+ };
+ /*
+--- kernel-maemo-2.6.28.test.orig/include/net/sctp/checksum.h
++++ kernel-maemo-2.6.28.test/include/net/sctp/checksum.h
+@@ -79,5 +79,5 @@
+ static inline __be32 sctp_end_cksum(__be32 crc32)
+ {
+-      return ~crc32;
++      return (__force __be32)~cpu_to_le32((__force u32)crc32);
+ }
+--- kernel-maemo-2.6.28.test.orig/include/net/sock.h
++++ kernel-maemo-2.6.28.test/include/net/sock.h
+@@ -784,7 +784,6 @@
+ static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
+ {
+-      skb_truesize_check(skb);
+       sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
+       sk->sk_wmem_queued -= skb->truesize;
+       sk_mem_uncharge(sk, skb->truesize);
+--- kernel-maemo-2.6.28.test.orig/init/Kconfig
++++ kernel-maemo-2.6.28.test/init/Kconfig
+@@ -538,6 +538,9 @@
+ config SYSCTL
+       bool
++config ANON_INODES
++      bool
++
+ menuconfig EMBEDDED
+       bool "Configure standard kernel features (for small systems)"
+       help
+@@ -643,18 +646,6 @@
+           This option allows to disable the internal PC-Speaker
+           support, saving some memory.
+-config COMPAT_BRK
+-      bool "Disable heap randomization"
+-      default y
+-      help
+-        Randomizing heap placement makes heap exploits harder, but it
+-        also breaks ancient binaries (including anything libc5 based).
+-        This option changes the bootup default to heap randomization
+-        disabled, and can be overriden runtime by setting
+-        /proc/sys/kernel/randomize_va_space to 2.
+-
+-        On non-ancient distros (post-2000 ones) N is usually a safe choice.
+-
+ config BASE_FULL
+       default y
+       bool "Enable full-sized data structures for core" if EMBEDDED
+@@ -672,9 +663,6 @@
+         support for "fast userspace mutexes".  The resulting kernel may not
+         run glibc-based applications correctly.
+-config ANON_INODES
+-      bool
+-
+ config EPOLL
+       bool "Enable eventpoll support" if EMBEDDED
+       default y
+@@ -760,6 +748,18 @@
+         SLUB sysfs support. /sys/slab will not exist and there will be
+         no support for cache validation etc.
++config COMPAT_BRK
++      bool "Disable heap randomization"
++      default y
++      help
++        Randomizing heap placement makes heap exploits harder, but it
++        also breaks ancient binaries (including anything libc5 based).
++        This option changes the bootup default to heap randomization
++        disabled, and can be overriden runtime by setting
++        /proc/sys/kernel/randomize_va_space to 2.
++
++        On non-ancient distros (post-2000 ones) N is usually a safe choice.
++
+ choice
+       prompt "Choose SLAB allocator"
+       default SLUB
+--- kernel-maemo-2.6.28.test.orig/ipc/mqueue.c
++++ kernel-maemo-2.6.28.test/ipc/mqueue.c
+@@ -506,7 +506,8 @@
+                       sig_i.si_errno = 0;
+                       sig_i.si_code = SI_MESGQ;
+                       sig_i.si_value = info->notify.sigev_value;
+-                      sig_i.si_pid = task_tgid_vnr(current);
++                      sig_i.si_pid = task_tgid_nr_ns(current,
++                                              ns_of_pid(info->notify_owner));
+                       sig_i.si_uid = current->uid;
+                       kill_pid_info(info->notify.sigev_signo,
+@@ -655,8 +656,8 @@
+       return dentry_open(dentry, mqueue_mnt, oflag);
+ }
+-asmlinkage long sys_mq_open(const char __user *u_name, int oflag, mode_t mode,
+-                              struct mq_attr __user *u_attr)
++SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, mode_t, mode,
++              struct mq_attr __user *, u_attr)
+ {
+       struct dentry *dentry;
+       struct file *filp;
+@@ -723,7 +724,7 @@
+       return fd;
+ }
+-asmlinkage long sys_mq_unlink(const char __user *u_name)
++SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
+ {
+       int err;
+       char *name;
+@@ -816,9 +817,9 @@
+       sender->state = STATE_READY;
+ }
+-asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
+-      size_t msg_len, unsigned int msg_prio,
+-      const struct timespec __user *u_abs_timeout)
++SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
++              size_t, msg_len, unsigned int, msg_prio,
++              const struct timespec __user *, u_abs_timeout)
+ {
+       struct file *filp;
+       struct inode *inode;
+@@ -904,9 +905,9 @@
+       return ret;
+ }
+-asmlinkage ssize_t sys_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr,
+-      size_t msg_len, unsigned int __user *u_msg_prio,
+-      const struct timespec __user *u_abs_timeout)
++SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
++              size_t, msg_len, unsigned int __user *, u_msg_prio,
++              const struct timespec __user *, u_abs_timeout)
+ {
+       long timeout;
+       ssize_t ret;
+@@ -989,8 +990,8 @@
+  * and he isn't currently owner of notification, will be silently discarded.
+  * It isn't explicitly defined in the POSIX.
+  */
+-asmlinkage long sys_mq_notify(mqd_t mqdes,
+-                              const struct sigevent __user *u_notification)
++SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
++              const struct sigevent __user *, u_notification)
+ {
+       int ret;
+       struct file *filp;
+@@ -1115,9 +1116,9 @@
+       return ret;
+ }
+-asmlinkage long sys_mq_getsetattr(mqd_t mqdes,
+-                      const struct mq_attr __user *u_mqstat,
+-                      struct mq_attr __user *u_omqstat)
++SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
++              const struct mq_attr __user *, u_mqstat,
++              struct mq_attr __user *, u_omqstat)
+ {
+       int ret;
+       struct mq_attr mqstat, omqstat;
+--- kernel-maemo-2.6.28.test.orig/ipc/msg.c
++++ kernel-maemo-2.6.28.test/ipc/msg.c
+@@ -309,7 +309,7 @@
+       return security_msg_queue_associate(msq, msgflg);
+ }
+-asmlinkage long sys_msgget(key_t key, int msgflg)
++SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
+ {
+       struct ipc_namespace *ns;
+       struct ipc_ops msg_ops;
+@@ -466,7 +466,7 @@
+       return err;
+ }
+-asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf)
++SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf)
+ {
+       struct msg_queue *msq;
+       int err, version;
+@@ -723,8 +723,8 @@
+       return err;
+ }
+-asmlinkage long
+-sys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz, int msgflg)
++SYSCALL_DEFINE4(msgsnd, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz,
++              int, msgflg)
+ {
+       long mtype;
+@@ -904,8 +904,8 @@
+       return msgsz;
+ }
+-asmlinkage long sys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz,
+-                         long msgtyp, int msgflg)
++SYSCALL_DEFINE5(msgrcv, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz,
++              long, msgtyp, int, msgflg)
+ {
+       long err, mtype;
+--- kernel-maemo-2.6.28.test.orig/ipc/sem.c
++++ kernel-maemo-2.6.28.test/ipc/sem.c
+@@ -308,7 +308,7 @@
+       return 0;
+ }
+-asmlinkage long sys_semget(key_t key, int nsems, int semflg)
++SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
+ {
+       struct ipc_namespace *ns;
+       struct ipc_ops sem_ops;
+@@ -887,7 +887,7 @@
+       return err;
+ }
+-asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg)
++SYSCALL_DEFINE(semctl)(int semid, int semnum, int cmd, union semun arg)
+ {
+       int err = -EINVAL;
+       int version;
+@@ -923,6 +923,13 @@
+               return -EINVAL;
+       }
+ }
++#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
++asmlinkage long SyS_semctl(int semid, int semnum, int cmd, union semun arg)
++{
++      return SYSC_semctl((int) semid, (int) semnum, (int) cmd, arg);
++}
++SYSCALL_ALIAS(sys_semctl, SyS_semctl);
++#endif
+ /* If the task doesn't already have a undo_list, then allocate one
+  * here.  We guarantee there is only one thread using this undo list,
+@@ -1048,8 +1055,8 @@
+       return un;
+ }
+-asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops,
+-                      unsigned nsops, const struct timespec __user *timeout)
++SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
++              unsigned, nsops, const struct timespec __user *, timeout)
+ {
+       int error = -EINVAL;
+       struct sem_array *sma;
+@@ -1226,7 +1233,8 @@
+       return error;
+ }
+-asmlinkage long sys_semop (int semid, struct sembuf __user *tsops, unsigned nsops)
++SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
++              unsigned, nsops)
+ {
+       return sys_semtimedop(semid, tsops, nsops, NULL);
+ }
+--- kernel-maemo-2.6.28.test.orig/ipc/shm.c
++++ kernel-maemo-2.6.28.test/ipc/shm.c
+@@ -440,7 +440,7 @@
+       return 0;
+ }
+-asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
++SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
+ {
+       struct ipc_namespace *ns;
+       struct ipc_ops shm_ops;
+@@ -565,11 +565,15 @@
+                       struct hstate *h = hstate_file(shp->shm_file);
+                       *rss += pages_per_huge_page(h) * mapping->nrpages;
+               } else {
++#ifdef CONFIG_SHMEM
+                       struct shmem_inode_info *info = SHMEM_I(inode);
+                       spin_lock(&info->lock);
+                       *rss += inode->i_mapping->nrpages;
+                       *swp += info->swapped;
+                       spin_unlock(&info->lock);
++#else
++                      *rss += inode->i_mapping->nrpages;
++#endif
+               }
+               total++;
+@@ -621,7 +625,7 @@
+       return err;
+ }
+-asmlinkage long sys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf)
++SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
+ {
+       struct shmid_kernel *shp;
+       int err, version;
+@@ -945,7 +949,7 @@
+       goto out_nattch;
+ }
+-asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg)
++SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
+ {
+       unsigned long ret;
+       long err;
+@@ -961,7 +965,7 @@
+  * detach and kill segment if marked destroyed.
+  * The work is done in shm_close.
+  */
+-asmlinkage long sys_shmdt(char __user *shmaddr)
++SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
+ {
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma, *next;
+--- kernel-maemo-2.6.28.test.orig/kernel/acct.c
++++ kernel-maemo-2.6.28.test/kernel/acct.c
+@@ -277,7 +277,7 @@
+  * should be written. If the filename is NULL, accounting will be
+  * shutdown.
+  */
+-asmlinkage long sys_acct(const char __user *name)
++SYSCALL_DEFINE1(acct, const char __user *, name)
+ {
+       int error;
+--- kernel-maemo-2.6.28.test.orig/kernel/capability.c
++++ kernel-maemo-2.6.28.test/kernel/capability.c
+@@ -348,7 +348,7 @@
+  *
+  * Returns 0 on success and < 0 on error.
+  */
+-asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr)
++SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
+ {
+       int ret = 0;
+       pid_t pid;
+@@ -425,7 +425,7 @@
+  *
+  * Returns 0 on success and < 0 on error.
+  */
+-asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
++SYSCALL_DEFINE2(capset, cap_user_header_t, header, const cap_user_data_t, data)
+ {
+       struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S];
+       unsigned i, tocopy;
+--- kernel-maemo-2.6.28.test.orig/kernel/exec_domain.c
++++ kernel-maemo-2.6.28.test/kernel/exec_domain.c
+@@ -209,8 +209,7 @@
+ module_init(proc_execdomains_init);
+ #endif
+-asmlinkage long
+-sys_personality(u_long personality)
++SYSCALL_DEFINE1(personality, u_long, personality)
+ {
+       u_long old = current->personality;
+--- kernel-maemo-2.6.28.test.orig/kernel/exit.c
++++ kernel-maemo-2.6.28.test/kernel/exit.c
+@@ -942,8 +942,7 @@
+        */
+       if (tsk->exit_signal != SIGCHLD && !task_detached(tsk) &&
+           (tsk->parent_exec_id != tsk->real_parent->self_exec_id ||
+-           tsk->self_exec_id != tsk->parent_exec_id) &&
+-          !capable(CAP_KILL))
++           tsk->self_exec_id != tsk->parent_exec_id))
+               tsk->exit_signal = SIGCHLD;
+       signal = tracehook_notify_death(tsk, &cookie, group_dead);
+@@ -1143,7 +1142,7 @@
+ EXPORT_SYMBOL(complete_and_exit);
+-asmlinkage long sys_exit(int error_code)
++SYSCALL_DEFINE1(exit, int, error_code)
+ {
+       do_exit((error_code&0xff)<<8);
+ }
+@@ -1184,9 +1183,11 @@
+  * wait4()-ing process will get the correct exit code - even if this
+  * thread is not the thread group leader.
+  */
+-asmlinkage void sys_exit_group(int error_code)
++SYSCALL_DEFINE1(exit_group, int, error_code)
+ {
+       do_group_exit((error_code & 0xff) << 8);
++      /* NOTREACHED */
++      return 0;
+ }
+ static struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
+@@ -1753,9 +1754,8 @@
+       return retval;
+ }
+-asmlinkage long sys_waitid(int which, pid_t upid,
+-                         struct siginfo __user *infop, int options,
+-                         struct rusage __user *ru)
++SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
++              infop, int, options, struct rusage __user *, ru)
+ {
+       struct pid *pid = NULL;
+       enum pid_type type;
+@@ -1794,8 +1794,8 @@
+       return ret;
+ }
+-asmlinkage long sys_wait4(pid_t upid, int __user *stat_addr,
+-                        int options, struct rusage __user *ru)
++SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
++              int, options, struct rusage __user *, ru)
+ {
+       struct pid *pid = NULL;
+       enum pid_type type;
+@@ -1832,7 +1832,7 @@
+  * sys_waitpid() remains for compatibility. waitpid() should be
+  * implemented by calling sys_wait4() from libc.a.
+  */
+-asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options)
++SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options)
+ {
+       return sys_wait4(pid, stat_addr, options, NULL);
+ }
+--- kernel-maemo-2.6.28.test.orig/kernel/fork.c
++++ kernel-maemo-2.6.28.test/kernel/fork.c
+@@ -894,7 +894,7 @@
+       clear_freeze_flag(p);
+ }
+-asmlinkage long sys_set_tid_address(int __user *tidptr)
++SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
+ {
+       current->clear_child_tid = tidptr;
+@@ -1176,10 +1176,6 @@
+ #endif
+       clear_all_latency_tracing(p);
+-      /* Our parent execution domain becomes current domain
+-         These must match for thread signalling to apply */
+-      p->parent_exec_id = p->self_exec_id;
+-
+       /* ok, now we should be set up.. */
+       p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
+       p->pdeath_signal = 0;
+@@ -1217,10 +1213,13 @@
+               set_task_cpu(p, smp_processor_id());
+       /* CLONE_PARENT re-uses the old parent */
+-      if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
++      if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
+               p->real_parent = current->real_parent;
+-      else
++              p->parent_exec_id = current->parent_exec_id;
++      } else {
+               p->real_parent = current;
++              p->parent_exec_id = current->self_exec_id;
++      }
+       spin_lock(&current->sighand->siglock);
+@@ -1589,7 +1588,7 @@
+  * constructed. Here we are modifying the current, active,
+  * task_struct.
+  */
+-asmlinkage long sys_unshare(unsigned long unshare_flags)
++SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
+ {
+       int err = 0;
+       struct fs_struct *fs, *new_fs = NULL;
+--- kernel-maemo-2.6.28.test.orig/kernel/futex.c
++++ kernel-maemo-2.6.28.test/kernel/futex.c
+@@ -1800,9 +1800,8 @@
+  * @head: pointer to the list-head
+  * @len: length of the list-head, as userspace expects
+  */
+-asmlinkage long
+-sys_set_robust_list(struct robust_list_head __user *head,
+-                  size_t len)
++SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
++              size_t, len)
+ {
+       if (!futex_cmpxchg_enabled)
+               return -ENOSYS;
+@@ -1823,9 +1822,9 @@
+  * @head_ptr: pointer to a list-head pointer, the kernel fills it in
+  * @len_ptr: pointer to a length field, the kernel fills in the header size
+  */
+-asmlinkage long
+-sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr,
+-                  size_t __user *len_ptr)
++SYSCALL_DEFINE3(get_robust_list, int, pid,
++              struct robust_list_head __user * __user *, head_ptr,
++              size_t __user *, len_ptr)
+ {
+       struct robust_list_head __user *head;
+       unsigned long ret;
+@@ -2039,9 +2038,9 @@
+ }
+-asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
+-                        struct timespec __user *utime, u32 __user *uaddr2,
+-                        u32 val3)
++SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
++              struct timespec __user *, utime, u32 __user *, uaddr2,
++              u32, val3)
+ {
+       struct timespec ts;
+       ktime_t t, *tp = NULL;
+--- kernel-maemo-2.6.28.test.orig/kernel/hrtimer.c
++++ kernel-maemo-2.6.28.test/kernel/hrtimer.c
+@@ -1630,8 +1630,8 @@
+       return ret;
+ }
+-asmlinkage long
+-sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp)
++SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
++              struct timespec __user *, rmtp)
+ {
+       struct timespec tu;
+--- kernel-maemo-2.6.28.test.orig/kernel/itimer.c
++++ kernel-maemo-2.6.28.test/kernel/itimer.c
+@@ -100,7 +100,7 @@
+       return 0;
+ }
+-asmlinkage long sys_getitimer(int which, struct itimerval __user *value)
++SYSCALL_DEFINE2(getitimer, int, which, struct itimerval __user *, value)
+ {
+       int error = -EFAULT;
+       struct itimerval get_buffer;
+@@ -260,9 +260,8 @@
+       return it_old.it_value.tv_sec;
+ }
+-asmlinkage long sys_setitimer(int which,
+-                            struct itimerval __user *value,
+-                            struct itimerval __user *ovalue)
++SYSCALL_DEFINE3(setitimer, int, which, struct itimerval __user *, value,
++              struct itimerval __user *, ovalue)
+ {
+       struct itimerval set_buffer, get_buffer;
+       int error;
+--- kernel-maemo-2.6.28.test.orig/kernel/kexec.c
++++ kernel-maemo-2.6.28.test/kernel/kexec.c
+@@ -934,9 +934,8 @@
+ static DEFINE_MUTEX(kexec_mutex);
+-asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
+-                              struct kexec_segment __user *segments,
+-                              unsigned long flags)
++SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
++              struct kexec_segment __user *, segments, unsigned long, flags)
+ {
+       struct kimage **dest_image, *image;
+       int result;
+--- kernel-maemo-2.6.28.test.orig/kernel/kprobes.c
++++ kernel-maemo-2.6.28.test/kernel/kprobes.c
+@@ -901,10 +901,8 @@
+               ri->rp = rp;
+               ri->task = current;
+-              if (rp->entry_handler && rp->entry_handler(ri, regs)) {
+-                      spin_unlock_irqrestore(&rp->lock, flags);
++              if (rp->entry_handler && rp->entry_handler(ri, regs))
+                       return 0;
+-              }
+               arch_prepare_kretprobe(ri, regs);
+--- kernel-maemo-2.6.28.test.orig/kernel/module.c
++++ kernel-maemo-2.6.28.test/kernel/module.c
+@@ -743,8 +743,8 @@
+       mutex_lock(&module_mutex);
+ }
+-asmlinkage long
+-sys_delete_module(const char __user *name_user, unsigned int flags)
++SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
++              unsigned int, flags)
+ {
+       struct module *mod;
+       char name[MODULE_NAME_LEN];
+@@ -2288,10 +2288,8 @@
+ }
+ /* This is where the real work happens */
+-asmlinkage long
+-sys_init_module(void __user *umod,
+-              unsigned long len,
+-              const char __user *uargs)
++SYSCALL_DEFINE3(init_module, void __user *, umod,
++              unsigned long, len, const char __user *, uargs)
+ {
+       struct module *mod;
+       int ret = 0;
+--- kernel-maemo-2.6.28.test.orig/kernel/posix-cpu-timers.c
++++ kernel-maemo-2.6.28.test/kernel/posix-cpu-timers.c
+@@ -294,7 +294,7 @@
+               cpu->cpu = virt_ticks(p);
+               break;
+       case CPUCLOCK_SCHED:
+-              cpu->sched = p->se.sum_exec_runtime + task_delta_exec(p);
++              cpu->sched = task_sched_runtime(p);
+               break;
+       }
+       return 0;
+@@ -310,18 +310,19 @@
+ {
+       struct task_cputime cputime;
+-      thread_group_cputime(p, &cputime);
+       switch (CPUCLOCK_WHICH(which_clock)) {
+       default:
+               return -EINVAL;
+       case CPUCLOCK_PROF:
++              thread_group_cputime(p, &cputime);
+               cpu->cpu = cputime_add(cputime.utime, cputime.stime);
+               break;
+       case CPUCLOCK_VIRT:
++              thread_group_cputime(p, &cputime);
+               cpu->cpu = cputime.utime;
+               break;
+       case CPUCLOCK_SCHED:
+-              cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p);
++              cpu->sched = thread_group_sched_runtime(p);
+               break;
+       }
+       return 0;
+--- kernel-maemo-2.6.28.test.orig/kernel/posix-timers.c
++++ kernel-maemo-2.6.28.test/kernel/posix-timers.c
+@@ -477,10 +477,9 @@
+ /* Create a POSIX.1b interval timer. */
+-asmlinkage long
+-sys_timer_create(const clockid_t which_clock,
+-               struct sigevent __user *timer_event_spec,
+-               timer_t __user * created_timer_id)
++SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
++              struct sigevent __user *, timer_event_spec,
++              timer_t __user *, created_timer_id)
+ {
+       struct k_itimer *new_timer;
+       int error, new_timer_id;
+@@ -666,8 +665,8 @@
+ }
+ /* Get the time remaining on a POSIX.1b interval timer. */
+-asmlinkage long
+-sys_timer_gettime(timer_t timer_id, struct itimerspec __user *setting)
++SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
++              struct itimerspec __user *, setting)
+ {
+       struct k_itimer *timr;
+       struct itimerspec cur_setting;
+@@ -696,8 +695,7 @@
+  * the call back to do_schedule_next_timer().  So all we need to do is
+  * to pick up the frozen overrun.
+  */
+-asmlinkage long
+-sys_timer_getoverrun(timer_t timer_id)
++SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
+ {
+       struct k_itimer *timr;
+       int overrun;
+@@ -765,10 +763,9 @@
+ }
+ /* Set a POSIX.1b interval timer */
+-asmlinkage long
+-sys_timer_settime(timer_t timer_id, int flags,
+-                const struct itimerspec __user *new_setting,
+-                struct itimerspec __user *old_setting)
++SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
++              const struct itimerspec __user *, new_setting,
++              struct itimerspec __user *, old_setting)
+ {
+       struct k_itimer *timr;
+       struct itimerspec new_spec, old_spec;
+@@ -821,8 +818,7 @@
+ }
+ /* Delete a POSIX.1b interval timer. */
+-asmlinkage long
+-sys_timer_delete(timer_t timer_id)
++SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
+ {
+       struct k_itimer *timer;
+       unsigned long flags;
+@@ -910,8 +906,8 @@
+ }
+ EXPORT_SYMBOL_GPL(do_posix_clock_nonanosleep);
+-asmlinkage long sys_clock_settime(const clockid_t which_clock,
+-                                const struct timespec __user *tp)
++SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
++              const struct timespec __user *, tp)
+ {
+       struct timespec new_tp;
+@@ -923,8 +919,8 @@
+       return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
+ }
+-asmlinkage long
+-sys_clock_gettime(const clockid_t which_clock, struct timespec __user *tp)
++SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock,
++              struct timespec __user *,tp)
+ {
+       struct timespec kernel_tp;
+       int error;
+@@ -940,8 +936,8 @@
+ }
+-asmlinkage long
+-sys_clock_getres(const clockid_t which_clock, struct timespec __user *tp)
++SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock,
++              struct timespec __user *, tp)
+ {
+       struct timespec rtn_tp;
+       int error;
+@@ -970,10 +966,9 @@
+                                which_clock);
+ }
+-asmlinkage long
+-sys_clock_nanosleep(const clockid_t which_clock, int flags,
+-                  const struct timespec __user *rqtp,
+-                  struct timespec __user *rmtp)
++SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
++              const struct timespec __user *, rqtp,
++              struct timespec __user *, rmtp)
+ {
+       struct timespec t;
+--- kernel-maemo-2.6.28.test.orig/kernel/printk.c
++++ kernel-maemo-2.6.28.test/kernel/printk.c
+@@ -386,7 +386,7 @@
+       return error;
+ }
+-asmlinkage long sys_syslog(int type, char __user *buf, int len)
++SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
+ {
+       return do_syslog(type, buf, len);
+ }
+@@ -749,11 +749,6 @@
+ #else
+-asmlinkage long sys_syslog(int type, char __user *buf, int len)
+-{
+-      return -ENOSYS;
+-}
+-
+ static void call_console_drivers(unsigned start, unsigned end)
+ {
+ }
+--- kernel-maemo-2.6.28.test.orig/kernel/ptrace.c
++++ kernel-maemo-2.6.28.test/kernel/ptrace.c
+@@ -545,7 +545,7 @@
+ #define arch_ptrace_attach(child)     do { } while (0)
+ #endif
+-asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
++SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
+ {
+       struct task_struct *child;
+       long ret;
+--- kernel-maemo-2.6.28.test.orig/kernel/relay.c
++++ kernel-maemo-2.6.28.test/kernel/relay.c
+@@ -663,8 +663,10 @@
+       mutex_lock(&relay_channels_mutex);
+       /* Is chan already set up? */
+-      if (unlikely(chan->has_base_filename))
++      if (unlikely(chan->has_base_filename)) {
++              mutex_unlock(&relay_channels_mutex);
+               return -EEXIST;
++      }
+       chan->has_base_filename = 1;
+       chan->parent = parent;
+       curr_cpu = get_cpu();
+--- kernel-maemo-2.6.28.test.orig/kernel/resource.c
++++ kernel-maemo-2.6.28.test/kernel/resource.c
+@@ -853,6 +853,15 @@
+               if (PFN_DOWN(p->start) <= PFN_DOWN(addr) &&
+                   PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1))
+                       continue;
++              /*
++               * if a resource is "BUSY", it's not a hardware resource
++               * but a driver mapping of such a resource; we don't want
++               * to warn for those; some drivers legitimately map only
++               * partial hardware resources. (example: vesafb)
++               */
++              if (p->flags & IORESOURCE_BUSY)
++                      continue;
++
+               printk(KERN_WARNING "resource map sanity check conflict: "
+                      "0x%llx 0x%llx 0x%llx 0x%llx %s\n",
+                      (unsigned long long)addr,
+--- kernel-maemo-2.6.28.test.orig/kernel/sched.c
++++ kernel-maemo-2.6.28.test/kernel/sched.c
+@@ -4064,9 +4064,25 @@
+ EXPORT_PER_CPU_SYMBOL(kstat);
+ /*
+- * Return any ns on the sched_clock that have not yet been banked in
++ * Return any ns on the sched_clock that have not yet been accounted in
+  * @p in case that task is currently running.
++ *
++ * Called with task_rq_lock() held on @rq.
+  */
++static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
++{
++      u64 ns = 0;
++
++      if (task_current(rq, p)) {
++              update_rq_clock(rq);
++              ns = rq->clock - p->se.exec_start;
++              if ((s64)ns < 0)
++                      ns = 0;
++      }
++
++      return ns;
++}
++
+ unsigned long long task_delta_exec(struct task_struct *p)
+ {
+       unsigned long flags;
+@@ -4074,16 +4090,49 @@
+       u64 ns = 0;
+       rq = task_rq_lock(p, &flags);
++      ns = do_task_delta_exec(p, rq);
++      task_rq_unlock(rq, &flags);
+-      if (task_current(rq, p)) {
+-              u64 delta_exec;
++      return ns;
++}
+-              update_rq_clock(rq);
+-              delta_exec = rq->clock - p->se.exec_start;
+-              if ((s64)delta_exec > 0)
+-                      ns = delta_exec;
+-      }
++/*
++ * Return accounted runtime for the task.
++ * In case the task is currently running, return the runtime plus current's
++ * pending runtime that have not been accounted yet.
++ */
++unsigned long long task_sched_runtime(struct task_struct *p)
++{
++      unsigned long flags;
++      struct rq *rq;
++      u64 ns = 0;
++
++      rq = task_rq_lock(p, &flags);
++      ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
++      task_rq_unlock(rq, &flags);
++
++      return ns;
++}
++/*
++ * Return sum_exec_runtime for the thread group.
++ * In case the task is currently running, return the sum plus current's
++ * pending runtime that have not been accounted yet.
++ *
++ * Note that the thread group might have other running tasks as well,
++ * so the return value not includes other pending runtime that other
++ * running tasks might have.
++ */
++unsigned long long thread_group_sched_runtime(struct task_struct *p)
++{
++      struct task_cputime totals;
++      unsigned long flags;
++      struct rq *rq;
++      u64 ns;
++
++      rq = task_rq_lock(p, &flags);
++      thread_group_cputime(p, &totals);
++      ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq);
+       task_rq_unlock(rq, &flags);
+       return ns;
+@@ -4586,8 +4635,8 @@
+  * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
+  * zero in this (rare) case, and we handle it by continuing to scan the queue.
+  */
+-static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
+-                           int nr_exclusive, int sync, void *key)
++void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
++                      int nr_exclusive, int sync, void *key)
+ {
+       wait_queue_t *curr, *next;
+@@ -5025,7 +5074,7 @@
+  * sys_setpriority is a more generic, but much slower function that
+  * does similar things.
+  */
+-asmlinkage long sys_nice(int increment)
++SYSCALL_DEFINE1(nice, int, increment)
+ {
+       long nice, retval;
+@@ -5317,8 +5366,8 @@
+  * @policy: new policy.
+  * @param: structure containing the new RT priority.
+  */
+-asmlinkage long
+-sys_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
++SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
++              struct sched_param __user *, param)
+ {
+       /* negative values for policy are not valid */
+       if (policy < 0)
+@@ -5332,7 +5381,7 @@
+  * @pid: the pid in question.
+  * @param: structure containing the new RT priority.
+  */
+-asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param)
++SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
+ {
+       return do_sched_setscheduler(pid, -1, param);
+ }
+@@ -5341,7 +5390,7 @@
+  * sys_sched_getscheduler - get the policy (scheduling class) of a thread
+  * @pid: the pid in question.
+  */
+-asmlinkage long sys_sched_getscheduler(pid_t pid)
++SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
+ {
+       struct task_struct *p;
+       int retval;
+@@ -5366,7 +5415,7 @@
+  * @pid: the pid in question.
+  * @param: structure containing the RT priority.
+  */
+-asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
++SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
+ {
+       struct sched_param lp;
+       struct task_struct *p;
+@@ -5474,8 +5523,8 @@
+  * @len: length in bytes of the bitmask pointed to by user_mask_ptr
+  * @user_mask_ptr: user-space pointer to the new cpu mask
+  */
+-asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
+-                                    unsigned long __user *user_mask_ptr)
++SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
++              unsigned long __user *, user_mask_ptr)
+ {
+       cpumask_t new_mask;
+       int retval;
+@@ -5519,8 +5568,8 @@
+  * @len: length in bytes of the bitmask pointed to by user_mask_ptr
+  * @user_mask_ptr: user-space pointer to hold the current cpu mask
+  */
+-asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
+-                                    unsigned long __user *user_mask_ptr)
++SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
++              unsigned long __user *, user_mask_ptr)
+ {
+       int ret;
+       cpumask_t mask;
+@@ -5544,7 +5593,7 @@
+  * This function yields the current CPU to other tasks. If there are no
+  * other threads running on this CPU then this function will return.
+  */
+-asmlinkage long sys_sched_yield(void)
++SYSCALL_DEFINE0(sched_yield)
+ {
+       struct rq *rq = this_rq_lock();
+@@ -5685,7 +5734,7 @@
+  * this syscall returns the maximum rt_priority that can be used
+  * by a given scheduling class.
+  */
+-asmlinkage long sys_sched_get_priority_max(int policy)
++SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
+ {
+       int ret = -EINVAL;
+@@ -5710,7 +5759,7 @@
+  * this syscall returns the minimum rt_priority that can be used
+  * by a given scheduling class.
+  */
+-asmlinkage long sys_sched_get_priority_min(int policy)
++SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
+ {
+       int ret = -EINVAL;
+@@ -5735,8 +5784,8 @@
+  * this syscall writes the default timeslice value of a given process
+  * into the user-space timespec buffer. A value of '0' means infinity.
+  */
+-asmlinkage
+-long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
++SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
++              struct timespec __user *, interval)
+ {
+       struct task_struct *p;
+       unsigned int time_slice;
+--- kernel-maemo-2.6.28.test.orig/kernel/sched_clock.c
++++ kernel-maemo-2.6.28.test/kernel/sched_clock.c
+@@ -124,7 +124,7 @@
+       clock = scd->tick_gtod + delta;
+       min_clock = wrap_max(scd->tick_gtod, scd->clock);
+-      max_clock = scd->tick_gtod + TICK_NSEC;
++      max_clock = wrap_max(scd->clock, scd->tick_gtod + TICK_NSEC);
+       clock = wrap_max(clock, min_clock);
+       clock = wrap_min(clock, max_clock);
+@@ -227,6 +227,9 @@
+  */
+ void sched_clock_idle_wakeup_event(u64 delta_ns)
+ {
++      if (timekeeping_suspended)
++              return;
++
+       sched_clock_tick();
+       touch_softlockup_watchdog();
+ }
+--- kernel-maemo-2.6.28.test.orig/kernel/sched_fair.c
++++ kernel-maemo-2.6.28.test/kernel/sched_fair.c
+@@ -283,7 +283,7 @@
+                                                  struct sched_entity,
+                                                  run_node);
+-              if (vruntime == cfs_rq->min_vruntime)
++              if (!cfs_rq->curr)
+                       vruntime = se->vruntime;
+               else
+                       vruntime = min_vruntime(vruntime, se->vruntime);
+@@ -681,9 +681,13 @@
+                       unsigned long thresh = sysctl_sched_latency;
+                       /*
+-                       * convert the sleeper threshold into virtual time
++                       * Convert the sleeper threshold into virtual time.
++                       * SCHED_IDLE is a special sub-class.  We care about
++                       * fairness only relative to other SCHED_IDLE tasks,
++                       * all of which have the same weight.
+                        */
+-                      if (sched_feat(NORMALIZED_SLEEPER))
++                      if (sched_feat(NORMALIZED_SLEEPER) &&
++                                      task_of(se)->policy != SCHED_IDLE)
+                               thresh = calc_delta_fair(thresh, se);
+                       vruntime -= thresh;
+@@ -1328,14 +1332,18 @@
+ static void set_last_buddy(struct sched_entity *se)
+ {
+-      for_each_sched_entity(se)
+-              cfs_rq_of(se)->last = se;
++      if (likely(task_of(se)->policy != SCHED_IDLE)) {
++              for_each_sched_entity(se)
++                      cfs_rq_of(se)->last = se;
++      }
+ }
+ static void set_next_buddy(struct sched_entity *se)
+ {
+-      for_each_sched_entity(se)
+-              cfs_rq_of(se)->next = se;
++      if (likely(task_of(se)->policy != SCHED_IDLE)) {
++              for_each_sched_entity(se)
++                      cfs_rq_of(se)->next = se;
++      }
+ }
+ /*
+@@ -1382,12 +1390,18 @@
+               return;
+       /*
+-       * Batch tasks do not preempt (their preemption is driven by
++       * Batch and idle tasks do not preempt (their preemption is driven by
+        * the tick):
+        */
+-      if (unlikely(p->policy == SCHED_BATCH))
++      if (unlikely(p->policy != SCHED_NORMAL))
+               return;
++      /* Idle tasks are by definition preempted by everybody. */
++      if (unlikely(curr->policy == SCHED_IDLE)) {
++              resched_task(curr);
++              return;
++      }
++
+       if (!sched_feat(WAKEUP_PREEMPT))
+               return;
+--- kernel-maemo-2.6.28.test.orig/kernel/seccomp.c
++++ kernel-maemo-2.6.28.test/kernel/seccomp.c
+@@ -8,6 +8,7 @@
+ #include <linux/seccomp.h>
+ #include <linux/sched.h>
++#include <linux/compat.h>
+ /* #define SECCOMP_DEBUG 1 */
+ #define NR_SECCOMP_MODES 1
+@@ -22,7 +23,7 @@
+       0, /* null terminated */
+ };
+-#ifdef TIF_32BIT
++#ifdef CONFIG_COMPAT
+ static int mode1_syscalls_32[] = {
+       __NR_seccomp_read_32, __NR_seccomp_write_32, __NR_seccomp_exit_32, __NR_seccomp_sigreturn_32,
+       0, /* null terminated */
+@@ -37,8 +38,8 @@
+       switch (mode) {
+       case 1:
+               syscall = mode1_syscalls;
+-#ifdef TIF_32BIT
+-              if (test_thread_flag(TIF_32BIT))
++#ifdef CONFIG_COMPAT
++              if (is_compat_task())
+                       syscall = mode1_syscalls_32;
+ #endif
+               do {
+--- kernel-maemo-2.6.28.test.orig/kernel/signal.c
++++ kernel-maemo-2.6.28.test/kernel/signal.c
+@@ -1552,7 +1552,15 @@
+       read_lock(&tasklist_lock);
+       if (may_ptrace_stop()) {
+               do_notify_parent_cldstop(current, CLD_TRAPPED);
++              /*
++               * Don't want to allow preemption here, because
++               * sys_ptrace() needs this task to be inactive.
++               *
++               * XXX: implement read_unlock_no_resched().
++               */
++              preempt_disable();
+               read_unlock(&tasklist_lock);
++              preempt_enable_no_resched();
+               schedule();
+       } else {
+               /*
+@@ -1940,7 +1948,7 @@
+  * System call entry points.
+  */
+-asmlinkage long sys_restart_syscall(void)
++SYSCALL_DEFINE0(restart_syscall)
+ {
+       struct restart_block *restart = &current_thread_info()->restart_block;
+       return restart->fn(restart);
+@@ -1993,8 +2001,8 @@
+       return error;
+ }
+-asmlinkage long
+-sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
++SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set,
++              sigset_t __user *, oset, size_t, sigsetsize)
+ {
+       int error = -EINVAL;
+       sigset_t old_set, new_set;
+@@ -2053,8 +2061,7 @@
+       return error;
+ }     
+-asmlinkage long
+-sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
++SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
+ {
+       return do_sigpending(set, sigsetsize);
+ }
+@@ -2125,11 +2132,9 @@
+ #endif
+-asmlinkage long
+-sys_rt_sigtimedwait(const sigset_t __user *uthese,
+-                  siginfo_t __user *uinfo,
+-                  const struct timespec __user *uts,
+-                  size_t sigsetsize)
++SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
++              siginfo_t __user *, uinfo, const struct timespec __user *, uts,
++              size_t, sigsetsize)
+ {
+       int ret, sig;
+       sigset_t these;
+@@ -2202,8 +2207,7 @@
+       return ret;
+ }
+-asmlinkage long
+-sys_kill(pid_t pid, int sig)
++SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
+ {
+       struct siginfo info;
+@@ -2262,7 +2266,7 @@
+  *  exists but it's not belonging to the target process anymore. This
+  *  method solves the problem of threads exiting and PIDs getting reused.
+  */
+-asmlinkage long sys_tgkill(pid_t tgid, pid_t pid, int sig)
++SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
+ {
+       /* This is only valid for single tasks */
+       if (pid <= 0 || tgid <= 0)
+@@ -2274,8 +2278,7 @@
+ /*
+  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
+  */
+-asmlinkage long
+-sys_tkill(pid_t pid, int sig)
++SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
+ {
+       /* This is only valid for single tasks */
+       if (pid <= 0)
+@@ -2284,8 +2287,8 @@
+       return do_tkill(0, pid, sig);
+ }
+-asmlinkage long
+-sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo)
++SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
++              siginfo_t __user *, uinfo)
+ {
+       siginfo_t info;
+@@ -2413,8 +2416,7 @@
+ #ifdef __ARCH_WANT_SYS_SIGPENDING
+-asmlinkage long
+-sys_sigpending(old_sigset_t __user *set)
++SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
+ {
+       return do_sigpending(set, sizeof(*set));
+ }
+@@ -2425,8 +2427,8 @@
+ /* Some platforms have their own version with special arguments others
+    support only sys_rt_sigprocmask.  */
+-asmlinkage long
+-sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
++SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set,
++              old_sigset_t __user *, oset)
+ {
+       int error;
+       old_sigset_t old_set, new_set;
+@@ -2476,11 +2478,10 @@
+ #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
+ #ifdef __ARCH_WANT_SYS_RT_SIGACTION
+-asmlinkage long
+-sys_rt_sigaction(int sig,
+-               const struct sigaction __user *act,
+-               struct sigaction __user *oact,
+-               size_t sigsetsize)
++SYSCALL_DEFINE4(rt_sigaction, int, sig,
++              const struct sigaction __user *, act,
++              struct sigaction __user *, oact,
++              size_t, sigsetsize)
+ {
+       struct k_sigaction new_sa, old_sa;
+       int ret = -EINVAL;
+@@ -2510,15 +2511,13 @@
+ /*
+  * For backwards compatibility.  Functionality superseded by sigprocmask.
+  */
+-asmlinkage long
+-sys_sgetmask(void)
++SYSCALL_DEFINE0(sgetmask)
+ {
+       /* SMP safe */
+       return current->blocked.sig[0];
+ }
+-asmlinkage long
+-sys_ssetmask(int newmask)
++SYSCALL_DEFINE1(ssetmask, int, newmask)
+ {
+       int old;
+@@ -2538,8 +2537,7 @@
+ /*
+  * For backwards compatibility.  Functionality superseded by sigaction.
+  */
+-asmlinkage unsigned long
+-sys_signal(int sig, __sighandler_t handler)
++SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
+ {
+       struct k_sigaction new_sa, old_sa;
+       int ret;
+@@ -2556,8 +2554,7 @@
+ #ifdef __ARCH_WANT_SYS_PAUSE
+-asmlinkage long
+-sys_pause(void)
++SYSCALL_DEFINE0(pause)
+ {
+       current->state = TASK_INTERRUPTIBLE;
+       schedule();
+@@ -2567,7 +2564,7 @@
+ #endif
+ #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
+-asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
++SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
+ {
+       sigset_t newset;
+--- kernel-maemo-2.6.28.test.orig/kernel/sys.c
++++ kernel-maemo-2.6.28.test/kernel/sys.c
+@@ -137,7 +137,7 @@
+       return error;
+ }
+-asmlinkage long sys_setpriority(int which, int who, int niceval)
++SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
+ {
+       struct task_struct *g, *p;
+       struct user_struct *user;
+@@ -201,7 +201,7 @@
+  * has been offset by 20 (ie it returns 40..1 instead of -20..19)
+  * to stay compatible.
+  */
+-asmlinkage long sys_getpriority(int which, int who)
++SYSCALL_DEFINE2(getpriority, int, which, int, who)
+ {
+       struct task_struct *g, *p;
+       struct user_struct *user;
+@@ -347,7 +347,8 @@
+  *
+  * reboot doesn't sync: do that yourself before calling this.
+  */
+-asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user * arg)
++SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
++              void __user *, arg)
+ {
+       char buffer[256];
+@@ -470,7 +471,7 @@
+  * SMP: There are not races, the GIDs are checked only by filesystem
+  *      operations (as far as semantic preservation is concerned).
+  */
+-asmlinkage long sys_setregid(gid_t rgid, gid_t egid)
++SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
+ {
+       int old_rgid = current->gid;
+       int old_egid = current->egid;
+@@ -519,7 +520,7 @@
+  *
+  * SMP: Same implicit races as above.
+  */
+-asmlinkage long sys_setgid(gid_t gid)
++SYSCALL_DEFINE1(setgid, gid_t, gid)
+ {
+       int old_egid = current->egid;
+       int retval;
+@@ -589,7 +590,7 @@
+  * 100% compatible with BSD.  A program which uses just setuid() will be
+  * 100% compatible with POSIX with saved IDs. 
+  */
+-asmlinkage long sys_setreuid(uid_t ruid, uid_t euid)
++SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
+ {
+       int old_ruid, old_euid, old_suid, new_ruid, new_euid;
+       int retval;
+@@ -651,7 +652,7 @@
+  * will allow a root program to temporarily drop privileges and be able to
+  * regain them by swapping the real and effective uid.  
+  */
+-asmlinkage long sys_setuid(uid_t uid)
++SYSCALL_DEFINE1(setuid, uid_t, uid)
+ {
+       int old_euid = current->euid;
+       int old_ruid, old_suid, new_suid;
+@@ -690,7 +691,7 @@
+  * This function implements a generic ability to update ruid, euid,
+  * and suid.  This allows you to implement the 4.4 compatible seteuid().
+  */
+-asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
++SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
+ {
+       int old_ruid = current->uid;
+       int old_euid = current->euid;
+@@ -733,7 +734,7 @@
+       return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RES);
+ }
+-asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid)
++SYSCALL_DEFINE3(getresuid, uid_t __user *, ruid, uid_t __user *, euid, uid_t __user *, suid)
+ {
+       int retval;
+@@ -747,7 +748,7 @@
+ /*
+  * Same as above, but for rgid, egid, sgid.
+  */
+-asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
++SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
+ {
+       int retval;
+@@ -784,7 +785,7 @@
+       return 0;
+ }
+-asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid)
++SYSCALL_DEFINE3(getresgid, gid_t __user *, rgid, gid_t __user *, egid, gid_t __user *, sgid)
+ {
+       int retval;
+@@ -802,7 +803,7 @@
+  * whatever uid it wants to). It normally shadows "euid", except when
+  * explicitly set by setfsuid() or for access..
+  */
+-asmlinkage long sys_setfsuid(uid_t uid)
++SYSCALL_DEFINE1(setfsuid, uid_t, uid)
+ {
+       int old_fsuid;
+@@ -831,7 +832,7 @@
+ /*
+  * Samma pÃ¥ svenska..
+  */
+-asmlinkage long sys_setfsgid(gid_t gid)
++SYSCALL_DEFINE1(setfsgid, gid_t, gid)
+ {
+       int old_fsgid;
+@@ -869,7 +870,7 @@
+       tms->tms_cstime = cputime_to_clock_t(cstime);
+ }
+-asmlinkage long sys_times(struct tms __user * tbuf)
++SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
+ {
+       if (tbuf) {
+               struct tms tmp;
+@@ -893,7 +894,7 @@
+  * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
+  * LBT 04.03.94
+  */
+-asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
++SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
+ {
+       struct task_struct *p;
+       struct task_struct *group_leader = current->group_leader;
+@@ -964,7 +965,7 @@
+       return err;
+ }
+-asmlinkage long sys_getpgid(pid_t pid)
++SYSCALL_DEFINE1(getpgid, pid_t, pid)
+ {
+       struct task_struct *p;
+       struct pid *grp;
+@@ -994,14 +995,14 @@
+ #ifdef __ARCH_WANT_SYS_GETPGRP
+-asmlinkage long sys_getpgrp(void)
++SYSCALL_DEFINE0(getpgrp)
+ {
+       return sys_getpgid(0);
+ }
+ #endif
+-asmlinkage long sys_getsid(pid_t pid)
++SYSCALL_DEFINE1(getsid, pid_t, pid)
+ {
+       struct task_struct *p;
+       struct pid *sid;
+@@ -1029,7 +1030,7 @@
+       return retval;
+ }
+-asmlinkage long sys_setsid(void)
++SYSCALL_DEFINE0(setsid)
+ {
+       struct task_struct *group_leader = current->group_leader;
+       struct pid *sid = task_pid(group_leader);
+@@ -1233,7 +1234,7 @@
+ EXPORT_SYMBOL(set_current_groups);
+-asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist)
++SYSCALL_DEFINE2(getgroups, int, gidsetsize, gid_t __user *, grouplist)
+ {
+       int i = 0;
+@@ -1266,7 +1267,7 @@
+  *    without another task interfering.
+  */
+  
+-asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist)
++SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist)
+ {
+       struct group_info *group_info;
+       int retval;
+@@ -1316,7 +1317,7 @@
+ DECLARE_RWSEM(uts_sem);
+-asmlinkage long sys_newuname(struct new_utsname __user * name)
++SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
+ {
+       int errno = 0;
+@@ -1327,7 +1328,7 @@
+       return errno;
+ }
+-asmlinkage long sys_sethostname(char __user *name, int len)
++SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
+ {
+       int errno;
+       char tmp[__NEW_UTS_LEN];
+@@ -1351,7 +1352,7 @@
+ #ifdef __ARCH_WANT_SYS_GETHOSTNAME
+-asmlinkage long sys_gethostname(char __user *name, int len)
++SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
+ {
+       int i, errno;
+       struct new_utsname *u;
+@@ -1376,7 +1377,7 @@
+  * Only setdomainname; getdomainname can be implemented by calling
+  * uname()
+  */
+-asmlinkage long sys_setdomainname(char __user *name, int len)
++SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
+ {
+       int errno;
+       char tmp[__NEW_UTS_LEN];
+@@ -1399,7 +1400,7 @@
+       return errno;
+ }
+-asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim)
++SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
+ {
+       if (resource >= RLIM_NLIMITS)
+               return -EINVAL;
+@@ -1418,7 +1419,8 @@
+  *    Back compatibility for getrlimit. Needed for some apps.
+  */
+  
+-asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim)
++SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
++              struct rlimit __user *, rlim)
+ {
+       struct rlimit x;
+       if (resource >= RLIM_NLIMITS)
+@@ -1436,7 +1438,7 @@
+ #endif
+-asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
++SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
+ {
+       struct rlimit new_rlim, *old_rlim;
+       int retval;
+@@ -1445,22 +1447,14 @@
+               return -EINVAL;
+       if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
+               return -EFAULT;
++      if (new_rlim.rlim_cur > new_rlim.rlim_max)
++              return -EINVAL;
+       old_rlim = current->signal->rlim + resource;
+       if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
+           !capable(CAP_SYS_RESOURCE))
+               return -EPERM;
+-
+-      if (resource == RLIMIT_NOFILE) {
+-              if (new_rlim.rlim_max == RLIM_INFINITY)
+-                      new_rlim.rlim_max = sysctl_nr_open;
+-              if (new_rlim.rlim_cur == RLIM_INFINITY)
+-                      new_rlim.rlim_cur = sysctl_nr_open;
+-              if (new_rlim.rlim_max > sysctl_nr_open)
+-                      return -EPERM;
+-      }
+-
+-      if (new_rlim.rlim_cur > new_rlim.rlim_max)
+-              return -EINVAL;
++      if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > sysctl_nr_open)
++              return -EPERM;
+       retval = security_task_setrlimit(resource, &new_rlim);
+       if (retval)
+@@ -1551,6 +1545,8 @@
+       utime = stime = cputime_zero;
+       if (who == RUSAGE_THREAD) {
++              utime = task_utime(current);
++              stime = task_stime(current);
+               accumulate_thread_rusage(p, r);
+               goto out;
+       }
+@@ -1607,7 +1603,7 @@
+       return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
+ }
+-asmlinkage long sys_getrusage(int who, struct rusage __user *ru)
++SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
+ {
+       if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
+           who != RUSAGE_THREAD)
+@@ -1615,14 +1611,14 @@
+       return getrusage(current, who, ru);
+ }
+-asmlinkage long sys_umask(int mask)
++SYSCALL_DEFINE1(umask, int, mask)
+ {
+       mask = xchg(&current->fs->umask, mask & S_IRWXUGO);
+       return mask;
+ }
+-asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
+-                        unsigned long arg4, unsigned long arg5)
++SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
++              unsigned long, arg4, unsigned long, arg5)
+ {
+       long error = 0;
+@@ -1733,8 +1729,8 @@
+       return error;
+ }
+-asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep,
+-                         struct getcpu_cache __user *unused)
++SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
++              struct getcpu_cache __user *, unused)
+ {
+       int err = 0;
+       int cpu = raw_smp_processor_id();
+--- kernel-maemo-2.6.28.test.orig/kernel/sys_ni.c
++++ kernel-maemo-2.6.28.test/kernel/sys_ni.c
+@@ -131,6 +131,7 @@
+ cond_syscall(sys_io_submit);
+ cond_syscall(sys_io_cancel);
+ cond_syscall(sys_io_getevents);
++cond_syscall(sys_syslog);
+ /* arch-specific weak syscall entries */
+ cond_syscall(sys_pciconfig_read);
+--- kernel-maemo-2.6.28.test.orig/kernel/sysctl.c
++++ kernel-maemo-2.6.28.test/kernel/sysctl.c
+@@ -1623,7 +1623,7 @@
+       return error;
+ }
+-asmlinkage long sys_sysctl(struct __sysctl_args __user *args)
++SYSCALL_DEFINE1(sysctl, struct __sysctl_args __user *, args)
+ {
+       struct __sysctl_args tmp;
+       int error;
+@@ -2924,7 +2924,7 @@
+ #else /* CONFIG_SYSCTL_SYSCALL */
+-asmlinkage long sys_sysctl(struct __sysctl_args __user *args)
++SYSCALL_DEFINE1(sysctl, struct __sysctl_args __user *, args)
+ {
+       struct __sysctl_args tmp;
+       int error;
+--- kernel-maemo-2.6.28.test.orig/kernel/time.c
++++ kernel-maemo-2.6.28.test/kernel/time.c
+@@ -59,7 +59,7 @@
+  * why not move it into the appropriate arch directory (for those
+  * architectures that need it).
+  */
+-asmlinkage long sys_time(time_t __user * tloc)
++SYSCALL_DEFINE1(time, time_t __user *, tloc)
+ {
+       time_t i = get_seconds();
+@@ -77,7 +77,7 @@
+  * architectures that need it).
+  */
+-asmlinkage long sys_stime(time_t __user *tptr)
++SYSCALL_DEFINE1(stime, time_t __user *, tptr)
+ {
+       struct timespec tv;
+       int err;
+@@ -97,8 +97,8 @@
+ #endif /* __ARCH_WANT_SYS_TIME */
+-asmlinkage long sys_gettimeofday(struct timeval __user *tv,
+-                               struct timezone __user *tz)
++SYSCALL_DEFINE2(gettimeofday, struct timeval __user *, tv,
++              struct timezone __user *, tz)
+ {
+       if (likely(tv != NULL)) {
+               struct timeval ktv;
+@@ -182,8 +182,8 @@
+       return 0;
+ }
+-asmlinkage long sys_settimeofday(struct timeval __user *tv,
+-                              struct timezone __user *tz)
++SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv,
++              struct timezone __user *, tz)
+ {
+       struct timeval user_tv;
+       struct timespec new_ts;
+@@ -203,7 +203,7 @@
+       return do_sys_settimeofday(tv ? &new_ts : NULL, tz ? &new_tz : NULL);
+ }
+-asmlinkage long sys_adjtimex(struct timex __user *txc_p)
++SYSCALL_DEFINE1(adjtimex, struct timex __user *, txc_p)
+ {
+       struct timex txc;               /* Local copy of parameter */
+       int ret;
+--- kernel-maemo-2.6.28.test.orig/kernel/time/timekeeping.c
++++ kernel-maemo-2.6.28.test/kernel/time/timekeeping.c
+@@ -46,6 +46,9 @@
+ struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
+ static unsigned long total_sleep_time;                /* seconds */
++/* flag for if timekeeping is suspended */
++int __read_mostly timekeeping_suspended;
++
+ static struct timespec xtime_cache __attribute__ ((aligned (16)));
+ void update_xtime_cache(u64 nsec)
+ {
+@@ -92,6 +95,8 @@
+       unsigned long seq;
+       s64 nsecs;
++      WARN_ON(timekeeping_suspended);
++
+       do {
+               seq = read_seqbegin(&xtime_lock);
+@@ -299,8 +304,6 @@
+       write_sequnlock_irqrestore(&xtime_lock, flags);
+ }
+-/* flag for if timekeeping is suspended */
+-static int timekeeping_suspended;
+ /* time in seconds when suspend began */
+ static unsigned long timekeeping_suspend_time;
+--- kernel-maemo-2.6.28.test.orig/kernel/timer.c
++++ kernel-maemo-2.6.28.test/kernel/timer.c
+@@ -1147,7 +1147,7 @@
+  * For backwards compatibility?  This can be done in libc so Alpha
+  * and all newer ports shouldn't need it.
+  */
+-asmlinkage unsigned long sys_alarm(unsigned int seconds)
++SYSCALL_DEFINE1(alarm, unsigned int, seconds)
+ {
+       return alarm_setitimer(seconds);
+ }
+@@ -1170,7 +1170,7 @@
+  *
+  * This is SMP safe as current->tgid does not change.
+  */
+-asmlinkage long sys_getpid(void)
++SYSCALL_DEFINE0(getpid)
+ {
+       return task_tgid_vnr(current);
+ }
+@@ -1181,7 +1181,7 @@
+  * value of ->real_parent under rcu_read_lock(), see
+  * release_task()->call_rcu(delayed_put_task_struct).
+  */
+-asmlinkage long sys_getppid(void)
++SYSCALL_DEFINE0(getppid)
+ {
+       int pid;
+@@ -1192,25 +1192,25 @@
+       return pid;
+ }
+-asmlinkage long sys_getuid(void)
++SYSCALL_DEFINE0(getuid)
+ {
+       /* Only we change this so SMP safe */
+       return current->uid;
+ }
+-asmlinkage long sys_geteuid(void)
++SYSCALL_DEFINE0(geteuid)
+ {
+       /* Only we change this so SMP safe */
+       return current->euid;
+ }
+-asmlinkage long sys_getgid(void)
++SYSCALL_DEFINE0(getgid)
+ {
+       /* Only we change this so SMP safe */
+       return current->gid;
+ }
+-asmlinkage long sys_getegid(void)
++SYSCALL_DEFINE0(getegid)
+ {
+       /* Only we change this so SMP safe */
+       return  current->egid;
+@@ -1326,7 +1326,7 @@
+ EXPORT_SYMBOL(schedule_timeout_uninterruptible);
+ /* Thread ID - the internal kernel "pid" */
+-asmlinkage long sys_gettid(void)
++SYSCALL_DEFINE0(gettid)
+ {
+       return task_pid_vnr(current);
+ }
+@@ -1418,7 +1418,7 @@
+       return 0;
+ }
+-asmlinkage long sys_sysinfo(struct sysinfo __user *info)
++SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
+ {
+       struct sysinfo val;
+--- kernel-maemo-2.6.28.test.orig/kernel/trace/ring_buffer.c
++++ kernel-maemo-2.6.28.test/kernel/trace/ring_buffer.c
+@@ -769,6 +769,7 @@
+        * back to us). This allows us to do a simple loop to
+        * assign the commit to the tail.
+        */
++ again:
+       while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
+               cpu_buffer->commit_page->commit =
+                       cpu_buffer->commit_page->write;
+@@ -783,6 +784,17 @@
+                       cpu_buffer->commit_page->write;
+               barrier();
+       }
++
++      /* again, keep gcc from optimizing */
++      barrier();
++
++      /*
++       * If an interrupt came in just after the first while loop
++       * and pushed the tail page forward, we will be left with
++       * a dangling commit that will never go forward.
++       */
++      if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
++              goto again;
+ }
+ static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
+@@ -880,12 +892,15 @@
+ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
+                 unsigned type, unsigned long length, u64 *ts)
+ {
+-      struct buffer_page *tail_page, *head_page, *reader_page;
++      struct buffer_page *tail_page, *head_page, *reader_page, *commit_page;
+       unsigned long tail, write;
+       struct ring_buffer *buffer = cpu_buffer->buffer;
+       struct ring_buffer_event *event;
+       unsigned long flags;
++      commit_page = cpu_buffer->commit_page;
++      /* we just need to protect against interrupts */
++      barrier();
+       tail_page = cpu_buffer->tail_page;
+       write = local_add_return(length, &tail_page->write);
+       tail = write - length;
+@@ -909,7 +924,7 @@
+                * it all the way around the buffer, bail, and warn
+                * about it.
+                */
+-              if (unlikely(next_page == cpu_buffer->commit_page)) {
++              if (unlikely(next_page == commit_page)) {
+                       WARN_ON_ONCE(1);
+                       goto out_unlock;
+               }
+--- kernel-maemo-2.6.28.test.orig/kernel/tsacct.c
++++ kernel-maemo-2.6.28.test/kernel/tsacct.c
+@@ -120,8 +120,10 @@
+       if (likely(tsk->mm)) {
+               cputime_t time, dtime;
+               struct timeval value;
++              unsigned long flags;
+               u64 delta;
++              local_irq_save(flags);
+               time = tsk->stime + tsk->utime;
+               dtime = cputime_sub(time, tsk->acct_timexpd);
+               jiffies_to_timeval(cputime_to_jiffies(dtime), &value);
+@@ -129,10 +131,12 @@
+               delta = delta * USEC_PER_SEC + value.tv_usec;
+               if (delta == 0)
+-                      return;
++                      goto out;
+               tsk->acct_timexpd = time;
+               tsk->acct_rss_mem1 += delta * get_mm_rss(tsk->mm);
+               tsk->acct_vm_mem1 += delta * tsk->mm->total_vm;
++      out:
++              local_irq_restore(flags);
+       }
+ }
+--- kernel-maemo-2.6.28.test.orig/kernel/uid16.c
++++ kernel-maemo-2.6.28.test/kernel/uid16.c
+@@ -17,7 +17,7 @@
+ #include <asm/uaccess.h>
+-asmlinkage long sys_chown16(const char __user * filename, old_uid_t user, old_gid_t group)
++SYSCALL_DEFINE3(chown16, const char __user *, filename, old_uid_t, user, old_gid_t, group)
+ {
+       long ret = sys_chown(filename, low2highuid(user), low2highgid(group));
+       /* avoid REGPARM breakage on x86: */
+@@ -25,7 +25,7 @@
+       return ret;
+ }
+-asmlinkage long sys_lchown16(const char __user * filename, old_uid_t user, old_gid_t group)
++SYSCALL_DEFINE3(lchown16, const char __user *, filename, old_uid_t, user, old_gid_t, group)
+ {
+       long ret = sys_lchown(filename, low2highuid(user), low2highgid(group));
+       /* avoid REGPARM breakage on x86: */
+@@ -33,7 +33,7 @@
+       return ret;
+ }
+-asmlinkage long sys_fchown16(unsigned int fd, old_uid_t user, old_gid_t group)
++SYSCALL_DEFINE3(fchown16, unsigned int, fd, old_uid_t, user, old_gid_t, group)
+ {
+       long ret = sys_fchown(fd, low2highuid(user), low2highgid(group));
+       /* avoid REGPARM breakage on x86: */
+@@ -41,7 +41,7 @@
+       return ret;
+ }
+-asmlinkage long sys_setregid16(old_gid_t rgid, old_gid_t egid)
++SYSCALL_DEFINE2(setregid16, old_gid_t, rgid, old_gid_t, egid)
+ {
+       long ret = sys_setregid(low2highgid(rgid), low2highgid(egid));
+       /* avoid REGPARM breakage on x86: */
+@@ -49,7 +49,7 @@
+       return ret;
+ }
+-asmlinkage long sys_setgid16(old_gid_t gid)
++SYSCALL_DEFINE1(setgid16, old_gid_t, gid)
+ {
+       long ret = sys_setgid(low2highgid(gid));
+       /* avoid REGPARM breakage on x86: */
+@@ -57,7 +57,7 @@
+       return ret;
+ }
+-asmlinkage long sys_setreuid16(old_uid_t ruid, old_uid_t euid)
++SYSCALL_DEFINE2(setreuid16, old_uid_t, ruid, old_uid_t, euid)
+ {
+       long ret = sys_setreuid(low2highuid(ruid), low2highuid(euid));
+       /* avoid REGPARM breakage on x86: */
+@@ -65,7 +65,7 @@
+       return ret;
+ }
+-asmlinkage long sys_setuid16(old_uid_t uid)
++SYSCALL_DEFINE1(setuid16, old_uid_t, uid)
+ {
+       long ret = sys_setuid(low2highuid(uid));
+       /* avoid REGPARM breakage on x86: */
+@@ -73,7 +73,7 @@
+       return ret;
+ }
+-asmlinkage long sys_setresuid16(old_uid_t ruid, old_uid_t euid, old_uid_t suid)
++SYSCALL_DEFINE3(setresuid16, old_uid_t, ruid, old_uid_t, euid, old_uid_t, suid)
+ {
+       long ret = sys_setresuid(low2highuid(ruid), low2highuid(euid),
+                                low2highuid(suid));
+@@ -82,7 +82,7 @@
+       return ret;
+ }
+-asmlinkage long sys_getresuid16(old_uid_t __user *ruid, old_uid_t __user *euid, old_uid_t __user *suid)
++SYSCALL_DEFINE3(getresuid16, old_uid_t __user *, ruid, old_uid_t __user *, euid, old_uid_t __user *, suid)
+ {
+       int retval;
+@@ -93,7 +93,7 @@
+       return retval;
+ }
+-asmlinkage long sys_setresgid16(old_gid_t rgid, old_gid_t egid, old_gid_t sgid)
++SYSCALL_DEFINE3(setresgid16, old_gid_t, rgid, old_gid_t, egid, old_gid_t, sgid)
+ {
+       long ret = sys_setresgid(low2highgid(rgid), low2highgid(egid),
+                                low2highgid(sgid));
+@@ -102,7 +102,8 @@
+       return ret;
+ }
+-asmlinkage long sys_getresgid16(old_gid_t __user *rgid, old_gid_t __user *egid, old_gid_t __user *sgid)
++
++SYSCALL_DEFINE3(getresgid16, old_gid_t __user *, rgid, old_gid_t __user *, egid, old_gid_t __user *, sgid)
+ {
+       int retval;
+@@ -113,7 +114,7 @@
+       return retval;
+ }
+-asmlinkage long sys_setfsuid16(old_uid_t uid)
++SYSCALL_DEFINE1(setfsuid16, old_uid_t, uid)
+ {
+       long ret = sys_setfsuid(low2highuid(uid));
+       /* avoid REGPARM breakage on x86: */
+@@ -121,7 +122,7 @@
+       return ret;
+ }
+-asmlinkage long sys_setfsgid16(old_gid_t gid)
++SYSCALL_DEFINE1(setfsgid16, old_gid_t, gid)
+ {
+       long ret = sys_setfsgid(low2highgid(gid));
+       /* avoid REGPARM breakage on x86: */
+@@ -159,7 +160,7 @@
+       return 0;
+ }
+-asmlinkage long sys_getgroups16(int gidsetsize, old_gid_t __user *grouplist)
++SYSCALL_DEFINE2(getgroups16, int, gidsetsize, old_gid_t __user *, grouplist)
+ {
+       int i = 0;
+@@ -183,7 +184,7 @@
+       return i;
+ }
+-asmlinkage long sys_setgroups16(int gidsetsize, old_gid_t __user *grouplist)
++SYSCALL_DEFINE2(setgroups16, int, gidsetsize, old_gid_t __user *, grouplist)
+ {
+       struct group_info *group_info;
+       int retval;
+@@ -208,22 +209,22 @@
+       return retval;
+ }
+-asmlinkage long sys_getuid16(void)
++SYSCALL_DEFINE0(getuid16)
+ {
+       return high2lowuid(current->uid);
+ }
+-asmlinkage long sys_geteuid16(void)
++SYSCALL_DEFINE0(geteuid16)
+ {
+       return high2lowuid(current->euid);
+ }
+-asmlinkage long sys_getgid16(void)
++SYSCALL_DEFINE0(getgid16)
+ {
+       return high2lowgid(current->gid);
+ }
+-asmlinkage long sys_getegid16(void)
++SYSCALL_DEFINE0(getegid16)
+ {
+       return high2lowgid(current->egid);
+ }
+--- kernel-maemo-2.6.28.test.orig/kernel/wait.c
++++ kernel-maemo-2.6.28.test/kernel/wait.c
+@@ -91,6 +91,15 @@
+ }
+ EXPORT_SYMBOL(prepare_to_wait_exclusive);
++/*
++ * finish_wait - clean up after waiting in a queue
++ * @q: waitqueue waited on
++ * @wait: wait descriptor
++ *
++ * Sets current thread back to running state and removes
++ * the wait descriptor from the given waitqueue if still
++ * queued.
++ */
+ void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
+ {
+       unsigned long flags;
+@@ -117,6 +126,39 @@
+ }
+ EXPORT_SYMBOL(finish_wait);
++/*
++ * abort_exclusive_wait - abort exclusive waiting in a queue
++ * @q: waitqueue waited on
++ * @wait: wait descriptor
++ * @state: runstate of the waiter to be woken
++ * @key: key to identify a wait bit queue or %NULL
++ *
++ * Sets current thread back to running state and removes
++ * the wait descriptor from the given waitqueue if still
++ * queued.
++ *
++ * Wakes up the next waiter if the caller is concurrently
++ * woken up through the queue.
++ *
++ * This prevents waiter starvation where an exclusive waiter
++ * aborts and is woken up concurrently and noone wakes up
++ * the next waiter.
++ */
++void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
++                      unsigned int mode, void *key)
++{
++      unsigned long flags;
++
++      __set_current_state(TASK_RUNNING);
++      spin_lock_irqsave(&q->lock, flags);
++      if (!list_empty(&wait->task_list))
++              list_del_init(&wait->task_list);
++      else if (waitqueue_active(q))
++              __wake_up_common(q, mode, 1, 0, key);
++      spin_unlock_irqrestore(&q->lock, flags);
++}
++EXPORT_SYMBOL(abort_exclusive_wait);
++
+ int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
+ {
+       int ret = default_wake_function(wait, mode, sync, key);
+@@ -177,17 +219,20 @@
+ __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
+                       int (*action)(void *), unsigned mode)
+ {
+-      int ret = 0;
+-
+       do {
++              int ret;
++
+               prepare_to_wait_exclusive(wq, &q->wait, mode);
+-              if (test_bit(q->key.bit_nr, q->key.flags)) {
+-                      if ((ret = (*action)(q->key.flags)))
+-                              break;
+-              }
++              if (!test_bit(q->key.bit_nr, q->key.flags))
++                      continue;
++              ret = action(q->key.flags);
++              if (!ret)
++                      continue;
++              abort_exclusive_wait(wq, &q->wait, mode, &q->key);
++              return ret;
+       } while (test_and_set_bit(q->key.bit_nr, q->key.flags));
+       finish_wait(wq, &q->wait);
+-      return ret;
++      return 0;
+ }
+ EXPORT_SYMBOL(__wait_on_bit_lock);
+--- kernel-maemo-2.6.28.test.orig/lib/idr.c
++++ kernel-maemo-2.6.28.test/lib/idr.c
+@@ -121,7 +121,7 @@
+ {
+       while (idp->id_free_cnt < IDR_FREE_MAX) {
+               struct idr_layer *new;
+-              new = kmem_cache_alloc(idr_layer_cache, gfp_mask);
++              new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
+               if (new == NULL)
+                       return (0);
+               move_to_free_list(idp, new);
+@@ -623,16 +623,10 @@
+ }
+ EXPORT_SYMBOL(idr_replace);
+-static void idr_cache_ctor(void *idr_layer)
+-{
+-      memset(idr_layer, 0, sizeof(struct idr_layer));
+-}
+-
+ void __init idr_init_cache(void)
+ {
+       idr_layer_cache = kmem_cache_create("idr_layer_cache",
+-                              sizeof(struct idr_layer), 0, SLAB_PANIC,
+-                              idr_cache_ctor);
++                              sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
+ }
+ /**
+--- kernel-maemo-2.6.28.test.orig/mm/fadvise.c
++++ kernel-maemo-2.6.28.test/mm/fadvise.c
+@@ -24,7 +24,7 @@
+  * POSIX_FADV_WILLNEED could set PG_Referenced, and POSIX_FADV_NOREUSE could
+  * deactivate the pages and clear PG_Referenced.
+  */
+-asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
++SYSCALL_DEFINE(fadvise64_64)(int fd, loff_t offset, loff_t len, int advice)
+ {
+       struct file *file = fget(fd);
+       struct address_space *mapping;
+@@ -126,12 +126,26 @@
+       fput(file);
+       return ret;
+ }
++#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
++asmlinkage long SyS_fadvise64_64(long fd, loff_t offset, loff_t len, long advice)
++{
++      return SYSC_fadvise64_64((int) fd, offset, len, (int) advice);
++}
++SYSCALL_ALIAS(sys_fadvise64_64, SyS_fadvise64_64);
++#endif
+ #ifdef __ARCH_WANT_SYS_FADVISE64
+-asmlinkage long sys_fadvise64(int fd, loff_t offset, size_t len, int advice)
++SYSCALL_DEFINE(fadvise64)(int fd, loff_t offset, size_t len, int advice)
+ {
+       return sys_fadvise64_64(fd, offset, len, advice);
+ }
++#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
++asmlinkage long SyS_fadvise64(long fd, loff_t offset, long len, long advice)
++{
++      return SYSC_fadvise64((int) fd, offset, (size_t)len, (int)advice);
++}
++SYSCALL_ALIAS(sys_fadvise64, SyS_fadvise64);
++#endif
+ #endif
+--- kernel-maemo-2.6.28.test.orig/mm/filemap.c
++++ kernel-maemo-2.6.28.test/mm/filemap.c
+@@ -210,7 +210,7 @@
+       int ret;
+       struct writeback_control wbc = {
+               .sync_mode = sync_mode,
+-              .nr_to_write = mapping->nrpages * 2,
++              .nr_to_write = LONG_MAX,
+               .range_start = start,
+               .range_end = end,
+       };
+@@ -1317,7 +1317,8 @@
+                       goto out; /* skip atime */
+               size = i_size_read(inode);
+               if (pos < size) {
+-                      retval = filemap_write_and_wait(mapping);
++                      retval = filemap_write_and_wait_range(mapping, pos,
++                                      pos + iov_length(iov, nr_segs) - 1);
+                       if (!retval) {
+                               retval = mapping->a_ops->direct_IO(READ, iocb,
+                                                       iov, pos, nr_segs);
+@@ -1366,7 +1367,7 @@
+       return 0;
+ }
+-asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
++SYSCALL_DEFINE(readahead)(int fd, loff_t offset, size_t count)
+ {
+       ssize_t ret;
+       struct file *file;
+@@ -1385,6 +1386,13 @@
+       }
+       return ret;
+ }
++#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
++asmlinkage long SyS_readahead(long fd, loff_t offset, long count)
++{
++      return SYSC_readahead((int) fd, offset, (size_t) count);
++}
++SYSCALL_ALIAS(sys_readahead, SyS_readahead);
++#endif
+ #ifdef CONFIG_MMU
+ /**
+@@ -2060,18 +2068,10 @@
+       if (count != ocount)
+               *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);
+-      /*
+-       * Unmap all mmappings of the file up-front.
+-       *
+-       * This will cause any pte dirty bits to be propagated into the
+-       * pageframes for the subsequent filemap_write_and_wait().
+-       */
+       write_len = iov_length(iov, *nr_segs);
+       end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT;
+-      if (mapping_mapped(mapping))
+-              unmap_mapping_range(mapping, pos, write_len, 0);
+-      written = filemap_write_and_wait(mapping);
++      written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
+       if (written)
+               goto out;
+@@ -2140,19 +2140,24 @@
+  * Find or create a page at the given pagecache position. Return the locked
+  * page. This function is specifically for buffered writes.
+  */
+-struct page *__grab_cache_page(struct address_space *mapping, pgoff_t index)
++struct page *grab_cache_page_write_begin(struct address_space *mapping,
++                                      pgoff_t index, unsigned flags)
+ {
+       int status;
+       struct page *page;
++      gfp_t gfp_notmask = 0;
++      if (flags & AOP_FLAG_NOFS)
++              gfp_notmask = __GFP_FS;
+ repeat:
+       page = find_lock_page(mapping, index);
+       if (likely(page))
+               return page;
+-      page = page_cache_alloc(mapping);
++      page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~gfp_notmask);
+       if (!page)
+               return NULL;
+-      status = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
++      status = add_to_page_cache_lru(page, mapping, index,
++                                              GFP_KERNEL & ~gfp_notmask);
+       if (unlikely(status)) {
+               page_cache_release(page);
+               if (status == -EEXIST)
+@@ -2161,7 +2166,7 @@
+       }
+       return page;
+ }
+-EXPORT_SYMBOL(__grab_cache_page);
++EXPORT_SYMBOL(grab_cache_page_write_begin);
+ static ssize_t generic_perform_write(struct file *file,
+                               struct iov_iter *i, loff_t pos)
+@@ -2286,7 +2291,8 @@
+        * the file data here, to try to honour O_DIRECT expectations.
+        */
+       if (unlikely(file->f_flags & O_DIRECT) && written)
+-              status = filemap_write_and_wait(mapping);
++              status = filemap_write_and_wait_range(mapping,
++                                      pos, pos + written - 1);
+       return written ? written : status;
+ }
+--- kernel-maemo-2.6.28.test.orig/mm/filemap_xip.c
++++ kernel-maemo-2.6.28.test/mm/filemap_xip.c
+@@ -89,8 +89,8 @@
+                       }
+               }
+               nr = nr - offset;
+-              if (nr > len)
+-                      nr = len;
++              if (nr > len - copied)
++                      nr = len - copied;
+               error = mapping->a_ops->get_xip_mem(mapping, index, 0,
+                                                       &xip_mem, &xip_pfn);
+--- kernel-maemo-2.6.28.test.orig/mm/fremap.c
++++ kernel-maemo-2.6.28.test/mm/fremap.c
+@@ -120,8 +120,8 @@
+  * and the vma's default protection is used. Arbitrary protections
+  * might be implemented in the future.
+  */
+-asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
+-      unsigned long prot, unsigned long pgoff, unsigned long flags)
++SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
++              unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
+ {
+       struct mm_struct *mm = current->mm;
+       struct address_space *mapping;
+--- kernel-maemo-2.6.28.test.orig/mm/madvise.c
++++ kernel-maemo-2.6.28.test/mm/madvise.c
+@@ -281,7 +281,7 @@
+  *  -EBADF  - map exists, but area maps something that isn't a file.
+  *  -EAGAIN - a kernel resource was temporarily unavailable.
+  */
+-asmlinkage long sys_madvise(unsigned long start, size_t len_in, int behavior)
++SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
+ {
+       unsigned long end, tmp;
+       struct vm_area_struct * vma, *prev;
+--- kernel-maemo-2.6.28.test.orig/mm/memory.c
++++ kernel-maemo-2.6.28.test/mm/memory.c
+@@ -1882,7 +1882,7 @@
+        * Don't let another task, with possibly unlocked vma,
+        * keep the mlocked page.
+        */
+-      if (vma->vm_flags & VM_LOCKED) {
++      if ((vma->vm_flags & VM_LOCKED) && old_page) {
+               lock_page(old_page);    /* for LRU manipulation */
+               clear_page_mlock(old_page);
+               unlock_page(old_page);
+--- kernel-maemo-2.6.28.test.orig/mm/mempolicy.c
++++ kernel-maemo-2.6.28.test/mm/mempolicy.c
+@@ -1068,10 +1068,9 @@
+       return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
+ }
+-asmlinkage long sys_mbind(unsigned long start, unsigned long len,
+-                      unsigned long mode,
+-                      unsigned long __user *nmask, unsigned long maxnode,
+-                      unsigned flags)
++SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
++              unsigned long, mode, unsigned long __user *, nmask,
++              unsigned long, maxnode, unsigned, flags)
+ {
+       nodemask_t nodes;
+       int err;
+@@ -1091,8 +1090,8 @@
+ }
+ /* Set the process memory policy */
+-asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
+-              unsigned long maxnode)
++SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask,
++              unsigned long, maxnode)
+ {
+       int err;
+       nodemask_t nodes;
+@@ -1110,9 +1109,9 @@
+       return do_set_mempolicy(mode, flags, &nodes);
+ }
+-asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
+-              const unsigned long __user *old_nodes,
+-              const unsigned long __user *new_nodes)
++SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
++              const unsigned long __user *, old_nodes,
++              const unsigned long __user *, new_nodes)
+ {
+       struct mm_struct *mm;
+       struct task_struct *task;
+@@ -1180,10 +1179,9 @@
+ /* Retrieve NUMA policy */
+-asmlinkage long sys_get_mempolicy(int __user *policy,
+-                              unsigned long __user *nmask,
+-                              unsigned long maxnode,
+-                              unsigned long addr, unsigned long flags)
++SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
++              unsigned long __user *, nmask, unsigned long, maxnode,
++              unsigned long, addr, unsigned long, flags)
+ {
+       int err;
+       int uninitialized_var(pval);
+--- kernel-maemo-2.6.28.test.orig/mm/migrate.c
++++ kernel-maemo-2.6.28.test/mm/migrate.c
+@@ -1070,10 +1070,10 @@
+  * Move a list of pages in the address space of the currently executing
+  * process.
+  */
+-asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages,
+-                      const void __user * __user *pages,
+-                      const int __user *nodes,
+-                      int __user *status, int flags)
++SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
++              const void __user * __user *, pages,
++              const int __user *, nodes,
++              int __user *, status, int, flags)
+ {
+       struct task_struct *task;
+       struct mm_struct *mm;
+--- kernel-maemo-2.6.28.test.orig/mm/mincore.c
++++ kernel-maemo-2.6.28.test/mm/mincore.c
+@@ -177,8 +177,8 @@
+  *            mapped
+  *  -EAGAIN - A kernel resource was temporarily unavailable.
+  */
+-asmlinkage long sys_mincore(unsigned long start, size_t len,
+-      unsigned char __user * vec)
++SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
++              unsigned char __user *, vec)
+ {
+       long retval;
+       unsigned long pages;
+--- kernel-maemo-2.6.28.test.orig/mm/mlock.c
++++ kernel-maemo-2.6.28.test/mm/mlock.c
+@@ -293,14 +293,10 @@
+  *
+  * return number of pages [> 0] to be removed from locked_vm on success
+  * of "special" vmas.
+- *
+- * return negative error if vma spanning @start-@range disappears while
+- * mmap semaphore is dropped.  Unlikely?
+  */
+ long mlock_vma_pages_range(struct vm_area_struct *vma,
+                       unsigned long start, unsigned long end)
+ {
+-      struct mm_struct *mm = vma->vm_mm;
+       int nr_pages = (end - start) / PAGE_SIZE;
+       BUG_ON(!(vma->vm_flags & VM_LOCKED));
+@@ -313,20 +309,11 @@
+       if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||
+                       is_vm_hugetlb_page(vma) ||
+                       vma == get_gate_vma(current))) {
+-              long error;
+-              downgrade_write(&mm->mmap_sem);
+-
+-              error = __mlock_vma_pages_range(vma, start, end, 1);
+-              up_read(&mm->mmap_sem);
+-              /* vma can change or disappear */
+-              down_write(&mm->mmap_sem);
+-              vma = find_vma(mm, start);
+-              /* non-NULL vma must contain @start, but need to check @end */
+-              if (!vma ||  end > vma->vm_end)
+-                      return -ENOMEM;
++              __mlock_vma_pages_range(vma, start, end, 1);
+-              return 0;       /* hide other errors from mmap(), et al */
++              /* Hide errors from mmap() and other callers */
++              return 0;
+       }
+       /*
+@@ -437,41 +424,14 @@
+       vma->vm_flags = newflags;
+       if (lock) {
+-              /*
+-               * mmap_sem is currently held for write.  Downgrade the write
+-               * lock to a read lock so that other faults, mmap scans, ...
+-               * while we fault in all pages.
+-               */
+-              downgrade_write(&mm->mmap_sem);
+-
+               ret = __mlock_vma_pages_range(vma, start, end, 1);
+-              /*
+-               * Need to reacquire mmap sem in write mode, as our callers
+-               * expect this.  We have no support for atomically upgrading
+-               * a sem to write, so we need to check for ranges while sem
+-               * is unlocked.
+-               */
+-              up_read(&mm->mmap_sem);
+-              /* vma can change or disappear */
+-              down_write(&mm->mmap_sem);
+-              *prev = find_vma(mm, start);
+-              /* non-NULL *prev must contain @start, but need to check @end */
+-              if (!(*prev) || end > (*prev)->vm_end)
+-                      ret = -ENOMEM;
+-              else if (ret > 0) {
++              if (ret > 0) {
+                       mm->locked_vm -= ret;
+                       ret = 0;
+               } else
+                       ret = __mlock_posix_error_return(ret); /* translate if needed */
+       } else {
+-              /*
+-               * TODO:  for unlocking, pages will already be resident, so
+-               * we don't need to wait for allocations/reclaim/pagein, ...
+-               * However, unlocking a very large region can still take a
+-               * while.  Should we downgrade the semaphore for both lock
+-               * AND unlock ?
+-               */
+               __mlock_vma_pages_range(vma, start, end, 0);
+       }
+@@ -529,7 +489,7 @@
+       return error;
+ }
+-asmlinkage long sys_mlock(unsigned long start, size_t len)
++SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
+ {
+       unsigned long locked;
+       unsigned long lock_limit;
+@@ -557,7 +517,7 @@
+       return error;
+ }
+-asmlinkage long sys_munlock(unsigned long start, size_t len)
++SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
+ {
+       int ret;
+@@ -594,7 +554,7 @@
+       return 0;
+ }
+-asmlinkage long sys_mlockall(int flags)
++SYSCALL_DEFINE1(mlockall, int, flags)
+ {
+       unsigned long lock_limit;
+       int ret = -EINVAL;
+@@ -622,7 +582,7 @@
+       return ret;
+ }
+-asmlinkage long sys_munlockall(void)
++SYSCALL_DEFINE0(munlockall)
+ {
+       int ret;
+--- kernel-maemo-2.6.28.test.orig/mm/mmap.c
++++ kernel-maemo-2.6.28.test/mm/mmap.c
+@@ -245,7 +245,7 @@
+       return next;
+ }
+-asmlinkage unsigned long sys_brk(unsigned long brk)
++SYSCALL_DEFINE1(brk, unsigned long, brk)
+ {
+       unsigned long rlim, retval;
+       unsigned long newbrk, oldbrk;
+@@ -1095,6 +1095,7 @@
+ {
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma, *prev;
++      struct vm_area_struct *merged_vma;
+       int correct_wcount = 0;
+       int error;
+       struct rb_node **rb_link, *rb_parent;
+@@ -1207,13 +1208,17 @@
+       if (vma_wants_writenotify(vma))
+               vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);
+-      if (file && vma_merge(mm, prev, addr, vma->vm_end,
+-                      vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) {
++      merged_vma = NULL;
++      if (file)
++              merged_vma = vma_merge(mm, prev, addr, vma->vm_end,
++                      vma->vm_flags, NULL, file, pgoff, vma_policy(vma));
++      if (merged_vma) {
+               mpol_put(vma_policy(vma));
+               kmem_cache_free(vm_area_cachep, vma);
+               fput(file);
+               if (vm_flags & VM_EXECUTABLE)
+                       removed_exe_file_vma(mm);
++              vma = merged_vma;
+       } else {
+               vma_link(mm, vma, prev, rb_link, rb_parent);
+               file = vma->vm_file;
+@@ -1575,7 +1580,7 @@
+        * Overcommit..  This must be the final test, as it will
+        * update security statistics.
+        */
+-      if (security_vm_enough_memory(grow))
++      if (security_vm_enough_memory_mm(mm, grow))
+               return -ENOMEM;
+       /* Ok, everything looks good - let it rip */
+@@ -1949,7 +1954,7 @@
+ EXPORT_SYMBOL(do_munmap);
+-asmlinkage long sys_munmap(unsigned long addr, size_t len)
++SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
+ {
+       int ret;
+       struct mm_struct *mm = current->mm;
+@@ -2088,7 +2093,6 @@
+       unsigned long end;
+       /* mm's last user has gone, and its about to be pulled down */
+-      arch_exit_mmap(mm);
+       mmu_notifier_release(mm);
+       if (mm->locked_vm) {
+@@ -2099,7 +2103,13 @@
+                       vma = vma->vm_next;
+               }
+       }
++
++      arch_exit_mmap(mm);
++
+       vma = mm->mmap;
++      if (!vma)       /* Can happen if dup_mmap() received an OOM */
++              return;
++
+       lru_add_drain();
+       flush_cache_mm(mm);
+       tlb = tlb_gather_mmu(mm, 1);
+--- kernel-maemo-2.6.28.test.orig/mm/mprotect.c
++++ kernel-maemo-2.6.28.test/mm/mprotect.c
+@@ -219,8 +219,8 @@
+       return error;
+ }
+-asmlinkage long
+-sys_mprotect(unsigned long start, size_t len, unsigned long prot)
++SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
++              unsigned long, prot)
+ {
+       unsigned long vm_flags, nstart, end, tmp, reqprot;
+       struct vm_area_struct *vma, *prev;
+--- kernel-maemo-2.6.28.test.orig/mm/mremap.c
++++ kernel-maemo-2.6.28.test/mm/mremap.c
+@@ -420,9 +420,9 @@
+       return ret;
+ }
+-asmlinkage unsigned long sys_mremap(unsigned long addr,
+-      unsigned long old_len, unsigned long new_len,
+-      unsigned long flags, unsigned long new_addr)
++SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
++              unsigned long, new_len, unsigned long, flags,
++              unsigned long, new_addr)
+ {
+       unsigned long ret;
+--- kernel-maemo-2.6.28.test.orig/mm/msync.c
++++ kernel-maemo-2.6.28.test/mm/msync.c
+@@ -28,7 +28,7 @@
+  * So by _not_ starting I/O in MS_ASYNC we provide complete flexibility to
+  * applications.
+  */
+-asmlinkage long sys_msync(unsigned long start, size_t len, int flags)
++SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags)
+ {
+       unsigned long end;
+       struct mm_struct *mm = current->mm;
+--- kernel-maemo-2.6.28.test.orig/mm/nommu.c
++++ kernel-maemo-2.6.28.test/mm/nommu.c
+@@ -377,7 +377,7 @@
+  *  to a regular file.  in this case, the unmapping will need
+  *  to invoke file system routines that need the global lock.
+  */
+-asmlinkage unsigned long sys_brk(unsigned long brk)
++SYSCALL_DEFINE1(brk, unsigned long, brk)
+ {
+       struct mm_struct *mm = current->mm;
+@@ -1192,7 +1192,7 @@
+ }
+ EXPORT_SYMBOL(do_munmap);
+-asmlinkage long sys_munmap(unsigned long addr, size_t len)
++SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
+ {
+       int ret;
+       struct mm_struct *mm = current->mm;
+@@ -1283,9 +1283,9 @@
+ }
+ EXPORT_SYMBOL(do_mremap);
+-asmlinkage unsigned long sys_mremap(unsigned long addr,
+-      unsigned long old_len, unsigned long new_len,
+-      unsigned long flags, unsigned long new_addr)
++SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
++              unsigned long, new_len, unsigned long, flags,
++              unsigned long, new_addr)
+ {
+       unsigned long ret;
+--- kernel-maemo-2.6.28.test.orig/mm/page-writeback.c
++++ kernel-maemo-2.6.28.test/mm/page-writeback.c
+@@ -980,9 +980,11 @@
+       int done = 0;
+       struct pagevec pvec;
+       int nr_pages;
++      pgoff_t uninitialized_var(writeback_index);
+       pgoff_t index;
+       pgoff_t end;            /* Inclusive */
+-      int scanned = 0;
++      pgoff_t done_index;
++      int cycled;
+       int range_whole = 0;
+       long nr_to_write = wbc->nr_to_write;
+@@ -993,83 +995,146 @@
+       pagevec_init(&pvec, 0);
+       if (wbc->range_cyclic) {
+-              index = mapping->writeback_index; /* Start from prev offset */
++              writeback_index = mapping->writeback_index; /* prev offset */
++              index = writeback_index;
++              if (index == 0)
++                      cycled = 1;
++              else
++                      cycled = 0;
+               end = -1;
+       } else {
+               index = wbc->range_start >> PAGE_CACHE_SHIFT;
+               end = wbc->range_end >> PAGE_CACHE_SHIFT;
+               if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
+                       range_whole = 1;
+-              scanned = 1;
++              cycled = 1; /* ignore range_cyclic tests */
+       }
+ retry:
+-      while (!done && (index <= end) &&
+-             (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
+-                                            PAGECACHE_TAG_DIRTY,
+-                                            min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
+-              unsigned i;
++      done_index = index;
++      while (!done && (index <= end)) {
++              int i;
++
++              nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
++                            PAGECACHE_TAG_DIRTY,
++                            min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
++              if (nr_pages == 0)
++                      break;
+-              scanned = 1;
+               for (i = 0; i < nr_pages; i++) {
+                       struct page *page = pvec.pages[i];
+                       /*
+-                       * At this point we hold neither mapping->tree_lock nor
+-                       * lock on the page itself: the page may be truncated or
+-                       * invalidated (changing page->mapping to NULL), or even
+-                       * swizzled back from swapper_space to tmpfs file
+-                       * mapping
++                       * At this point, the page may be truncated or
++                       * invalidated (changing page->mapping to NULL), or
++                       * even swizzled back from swapper_space to tmpfs file
++                       * mapping. However, page->index will not change
++                       * because we have a reference on the page.
+                        */
++                      if (page->index > end) {
++                              /*
++                               * can't be range_cyclic (1st pass) because
++                               * end == -1 in that case.
++                               */
++                              done = 1;
++                              break;
++                      }
++
++                      done_index = page->index + 1;
++
+                       lock_page(page);
++                      /*
++                       * Page truncated or invalidated. We can freely skip it
++                       * then, even for data integrity operations: the page
++                       * has disappeared concurrently, so there could be no
++                       * real expectation of this data interity operation
++                       * even if there is now a new, dirty page at the same
++                       * pagecache address.
++                       */
+                       if (unlikely(page->mapping != mapping)) {
++continue_unlock:
+                               unlock_page(page);
+                               continue;
+                       }
+-                      if (!wbc->range_cyclic && page->index > end) {
+-                              done = 1;
+-                              unlock_page(page);
+-                              continue;
++                      if (!PageDirty(page)) {
++                              /* someone wrote it for us */
++                              goto continue_unlock;
+                       }
+-                      if (wbc->sync_mode != WB_SYNC_NONE)
+-                              wait_on_page_writeback(page);
+-
+-                      if (PageWriteback(page) ||
+-                          !clear_page_dirty_for_io(page)) {
+-                              unlock_page(page);
+-                              continue;
++                      if (PageWriteback(page)) {
++                              if (wbc->sync_mode != WB_SYNC_NONE)
++                                      wait_on_page_writeback(page);
++                              else
++                                      goto continue_unlock;
+                       }
+-                      ret = (*writepage)(page, wbc, data);
++                      BUG_ON(PageWriteback(page));
++                      if (!clear_page_dirty_for_io(page))
++                              goto continue_unlock;
+-                      if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
+-                              unlock_page(page);
+-                              ret = 0;
++                      ret = (*writepage)(page, wbc, data);
++                      if (unlikely(ret)) {
++                              if (ret == AOP_WRITEPAGE_ACTIVATE) {
++                                      unlock_page(page);
++                                      ret = 0;
++                              } else {
++                                      /*
++                                       * done_index is set past this page,
++                                       * so media errors will not choke
++                                       * background writeout for the entire
++                                       * file. This has consequences for
++                                       * range_cyclic semantics (ie. it may
++                                       * not be suitable for data integrity
++                                       * writeout).
++                                       */
++                                      done = 1;
++                                      break;
++                              }
++                      }
++
++                      if (nr_to_write > 0) {
++                              nr_to_write--;
++                              if (nr_to_write == 0 &&
++                                  wbc->sync_mode == WB_SYNC_NONE) {
++                                      /*
++                                       * We stop writing back only if we are
++                                       * not doing integrity sync. In case of
++                                       * integrity sync we have to keep going
++                                       * because someone may be concurrently
++                                       * dirtying pages, and we might have
++                                       * synced a lot of newly appeared dirty
++                                       * pages, but have not synced all of the
++                                       * old dirty pages.
++                                       */
++                                      done = 1;
++                                      break;
++                              }
+                       }
+-                      if (ret || (--nr_to_write <= 0))
+-                              done = 1;
++
+                       if (wbc->nonblocking && bdi_write_congested(bdi)) {
+                               wbc->encountered_congestion = 1;
+                               done = 1;
++                              break;
+                       }
+               }
+               pagevec_release(&pvec);
+               cond_resched();
+       }
+-      if (!scanned && !done) {
++      if (!cycled && !done) {
+               /*
++               * range_cyclic:
+                * We hit the last page and there is more work to be done: wrap
+                * back to the start of the file
+                */
+-              scanned = 1;
++              cycled = 1;
+               index = 0;
++              end = writeback_index - 1;
+               goto retry;
+       }
+       if (!wbc->no_nrwrite_index_update) {
+               if (wbc->range_cyclic || (range_whole && nr_to_write > 0))
+-                      mapping->writeback_index = index;
++                      mapping->writeback_index = done_index;
+               wbc->nr_to_write = nr_to_write;
+       }
+--- kernel-maemo-2.6.28.test.orig/mm/page_alloc.c
++++ kernel-maemo-2.6.28.test/mm/page_alloc.c
+@@ -2974,7 +2974,7 @@
+  * was used and there are no special requirements, this is a convenient
+  * alternative
+  */
+-int __meminit early_pfn_to_nid(unsigned long pfn)
++int __meminit __early_pfn_to_nid(unsigned long pfn)
+ {
+       int i;
+@@ -2985,10 +2985,33 @@
+               if (start_pfn <= pfn && pfn < end_pfn)
+                       return early_node_map[i].nid;
+       }
++      /* This is a memory hole */
++      return -1;
++}
++#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
++
++int __meminit early_pfn_to_nid(unsigned long pfn)
++{
++      int nid;
++      nid = __early_pfn_to_nid(pfn);
++      if (nid >= 0)
++              return nid;
++      /* just returns 0 */
+       return 0;
+ }
+-#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
++
++#ifdef CONFIG_NODES_SPAN_OTHER_NODES
++bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
++{
++      int nid;
++
++      nid = __early_pfn_to_nid(pfn);
++      if (nid >= 0 && nid != node)
++              return false;
++      return true;
++}
++#endif
+ /* Basic iterator support to walk early_node_map[] */
+ #define for_each_active_range_index_in_nid(i, nid) \
+--- kernel-maemo-2.6.28.test.orig/mm/swapfile.c
++++ kernel-maemo-2.6.28.test/mm/swapfile.c
+@@ -1352,7 +1352,7 @@
+ }
+ #endif
+-asmlinkage long sys_swapoff(const char __user * specialfile)
++SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
+ {
+       struct swap_info_struct * p = NULL;
+       unsigned short *swap_map;
+@@ -1597,7 +1597,7 @@
+  *
+  * The swapon system call
+  */
+-asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
++SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
+ {
+       struct swap_info_struct * p;
+       char *name = NULL;
+--- kernel-maemo-2.6.28.test.orig/mm/vmalloc.c
++++ kernel-maemo-2.6.28.test/mm/vmalloc.c
+@@ -23,6 +23,7 @@
+ #include <linux/rbtree.h>
+ #include <linux/radix-tree.h>
+ #include <linux/rcupdate.h>
++#include <linux/bootmem.h>
+ #include <asm/atomic.h>
+ #include <asm/uaccess.h>
+@@ -151,11 +152,12 @@
+  *
+  * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
+  */
+-static int vmap_page_range(unsigned long addr, unsigned long end,
++static int vmap_page_range(unsigned long start, unsigned long end,
+                               pgprot_t prot, struct page **pages)
+ {
+       pgd_t *pgd;
+       unsigned long next;
++      unsigned long addr = start;
+       int err = 0;
+       int nr = 0;
+@@ -167,7 +169,7 @@
+               if (err)
+                       break;
+       } while (pgd++, addr = next, addr != end);
+-      flush_cache_vmap(addr, end);
++      flush_cache_vmap(start, end);
+       if (unlikely(err))
+               return err;
+@@ -321,6 +323,7 @@
+       unsigned long addr;
+       int purged = 0;
++      BUG_ON(!size);
+       BUG_ON(size & ~PAGE_MASK);
+       va = kmalloc_node(sizeof(struct vmap_area),
+@@ -332,6 +335,9 @@
+       addr = ALIGN(vstart, align);
+       spin_lock(&vmap_area_lock);
++      if (addr + size - 1 < addr)
++              goto overflow;
++
+       /* XXX: could have a last_hole cache */
+       n = vmap_area_root.rb_node;
+       if (n) {
+@@ -363,6 +369,8 @@
+               while (addr + size > first->va_start && addr + size <= vend) {
+                       addr = ALIGN(first->va_end + PAGE_SIZE, align);
++                      if (addr + size - 1 < addr)
++                              goto overflow;
+                       n = rb_next(&first->rb_node);
+                       if (n)
+@@ -373,6 +381,7 @@
+       }
+ found:
+       if (addr + size > vend) {
++overflow:
+               spin_unlock(&vmap_area_lock);
+               if (!purged) {
+                       purge_vmap_area_lazy();
+@@ -474,6 +483,7 @@
+       static DEFINE_SPINLOCK(purge_lock);
+       LIST_HEAD(valist);
+       struct vmap_area *va;
++      struct vmap_area *n_va;
+       int nr = 0;
+       /*
+@@ -513,7 +523,7 @@
+       if (nr) {
+               spin_lock(&vmap_area_lock);
+-              list_for_each_entry(va, &valist, purge_list)
++              list_for_each_entry_safe(va, n_va, &valist, purge_list)
+                       __free_vmap_area(va);
+               spin_unlock(&vmap_area_lock);
+       }
+@@ -959,6 +969,8 @@
+ void __init vmalloc_init(void)
+ {
++      struct vmap_area *va;
++      struct vm_struct *tmp;
+       int i;
+       for_each_possible_cpu(i) {
+@@ -971,12 +983,22 @@
+               vbq->nr_dirty = 0;
+       }
++      /* Import existing vmlist entries. */
++      for (tmp = vmlist; tmp; tmp = tmp->next) {
++              va = alloc_bootmem(sizeof(struct vmap_area));
++              va->flags = tmp->flags | VM_VM_AREA;
++              va->va_start = (unsigned long)tmp->addr;
++              va->va_end = va->va_start + tmp->size;
++              __insert_vmap_area(va);
++      }
+       vmap_initialized = true;
+ }
+ void unmap_kernel_range(unsigned long addr, unsigned long size)
+ {
+       unsigned long end = addr + size;
++
++      flush_cache_vunmap(addr, end);
+       vunmap_page_range(addr, end);
+       flush_tlb_kernel_range(addr, end);
+ }
+--- kernel-maemo-2.6.28.test.orig/net/bridge/br_if.c
++++ kernel-maemo-2.6.28.test/net/bridge/br_if.c
+@@ -426,7 +426,6 @@
+ err1:
+       kobject_del(&p->kobj);
+ err0:
+-      kobject_put(&p->kobj);
+       dev_set_promiscuity(dev, -1);
+ put_back:
+       dev_put(dev);
+--- kernel-maemo-2.6.28.test.orig/net/bridge/netfilter/ebtables.c
++++ kernel-maemo-2.6.28.test/net/bridge/netfilter/ebtables.c
+@@ -80,7 +80,7 @@
+ {
+       par->match     = m->u.match;
+       par->matchinfo = m->data;
+-      return m->u.match->match(skb, par);
++      return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH;
+ }
+ static inline int ebt_dev_check(char *entry, const struct net_device *device)
+--- kernel-maemo-2.6.28.test.orig/net/core/net_namespace.c
++++ kernel-maemo-2.6.28.test/net/core/net_namespace.c
+@@ -342,8 +342,8 @@
+       rv = register_pernet_operations(first_device, ops);
+       if (rv < 0)
+               ida_remove(&net_generic_ids, *id);
+-      mutex_unlock(&net_mutex);
+ out:
++      mutex_unlock(&net_mutex);
+       return rv;
+ }
+ EXPORT_SYMBOL_GPL(register_pernet_gen_subsys);
+--- kernel-maemo-2.6.28.test.orig/net/core/skbuff.c
++++ kernel-maemo-2.6.28.test/net/core/skbuff.c
+@@ -73,17 +73,13 @@
+ static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
+                                 struct pipe_buffer *buf)
+ {
+-      struct sk_buff *skb = (struct sk_buff *) buf->private;
+-
+-      kfree_skb(skb);
++      put_page(buf->page);
+ }
+ static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
+                               struct pipe_buffer *buf)
+ {
+-      struct sk_buff *skb = (struct sk_buff *) buf->private;
+-
+-      skb_get(skb);
++      get_page(buf->page);
+ }
+ static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
+@@ -147,14 +143,6 @@
+       BUG();
+ }
+-void skb_truesize_bug(struct sk_buff *skb)
+-{
+-      WARN(net_ratelimit(), KERN_ERR "SKB BUG: Invalid truesize (%u) "
+-             "len=%u, sizeof(sk_buff)=%Zd\n",
+-             skb->truesize, skb->len, sizeof(struct sk_buff));
+-}
+-EXPORT_SYMBOL(skb_truesize_bug);
+-
+ /*    Allocate a new skbuff. We do this ourselves so we can fill in a few
+  *    'private' fields and also do memory statistics to find all the
+  *    [BEEP] leaks.
+@@ -1333,9 +1321,19 @@
+  */
+ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
+ {
+-      struct sk_buff *skb = (struct sk_buff *) spd->partial[i].private;
++      put_page(spd->pages[i]);
++}
+-      kfree_skb(skb);
++static inline struct page *linear_to_page(struct page *page, unsigned int len,
++                                        unsigned int offset)
++{
++      struct page *p = alloc_pages(GFP_KERNEL, 0);
++
++      if (!p)
++              return NULL;
++      memcpy(page_address(p) + offset, page_address(page) + offset, len);
++
++      return p;
+ }
+ /*
+@@ -1343,16 +1341,23 @@
+  */
+ static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page,
+                               unsigned int len, unsigned int offset,
+-                              struct sk_buff *skb)
++                              struct sk_buff *skb, int linear)
+ {
+       if (unlikely(spd->nr_pages == PIPE_BUFFERS))
+               return 1;
++      if (linear) {
++              page = linear_to_page(page, len, offset);
++              if (!page)
++                      return 1;
++      } else
++              get_page(page);
++
+       spd->pages[spd->nr_pages] = page;
+       spd->partial[spd->nr_pages].len = len;
+       spd->partial[spd->nr_pages].offset = offset;
+-      spd->partial[spd->nr_pages].private = (unsigned long) skb_get(skb);
+       spd->nr_pages++;
++
+       return 0;
+ }
+@@ -1368,7 +1373,7 @@
+ static inline int __splice_segment(struct page *page, unsigned int poff,
+                                  unsigned int plen, unsigned int *off,
+                                  unsigned int *len, struct sk_buff *skb,
+-                                 struct splice_pipe_desc *spd)
++                                 struct splice_pipe_desc *spd, int linear)
+ {
+       if (!*len)
+               return 1;
+@@ -1391,7 +1396,7 @@
+               /* the linear region may spread across several pages  */
+               flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
+-              if (spd_fill_page(spd, page, flen, poff, skb))
++              if (spd_fill_page(spd, page, flen, poff, skb, linear))
+                       return 1;
+               __segment_seek(&page, &poff, &plen, flen);
+@@ -1418,7 +1423,7 @@
+       if (__splice_segment(virt_to_page(skb->data),
+                            (unsigned long) skb->data & (PAGE_SIZE - 1),
+                            skb_headlen(skb),
+-                           offset, len, skb, spd))
++                           offset, len, skb, spd, 1))
+               return 1;
+       /*
+@@ -1428,7 +1433,7 @@
+               const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
+               if (__splice_segment(f->page, f->page_offset, f->size,
+-                                   offset, len, skb, spd))
++                                   offset, len, skb, spd, 0))
+                       return 1;
+       }
+@@ -1441,7 +1446,7 @@
+  * the frag list, if such a thing exists. We'd probably need to recurse to
+  * handle that cleanly.
+  */
+-int skb_splice_bits(struct sk_buff *__skb, unsigned int offset,
++int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
+                   struct pipe_inode_info *pipe, unsigned int tlen,
+                   unsigned int flags)
+ {
+@@ -1454,16 +1459,6 @@
+               .ops = &sock_pipe_buf_ops,
+               .spd_release = sock_spd_release,
+       };
+-      struct sk_buff *skb;
+-
+-      /*
+-       * I'd love to avoid the clone here, but tcp_read_sock()
+-       * ignores reference counts and unconditonally kills the sk_buff
+-       * on return from the actor.
+-       */
+-      skb = skb_clone(__skb, GFP_KERNEL);
+-      if (unlikely(!skb))
+-              return -ENOMEM;
+       /*
+        * __skb_splice_bits() only fails if the output has no room left,
+@@ -1487,15 +1482,9 @@
+       }
+ done:
+-      /*
+-       * drop our reference to the clone, the pipe consumption will
+-       * drop the rest.
+-       */
+-      kfree_skb(skb);
+-
+       if (spd.nr_pages) {
++              struct sock *sk = skb->sk;
+               int ret;
+-              struct sock *sk = __skb->sk;
+               /*
+                * Drop the socket lock, otherwise we have reverse
+@@ -2072,10 +2061,10 @@
+               return 0;
+ next_skb:
+-      block_limit = skb_headlen(st->cur_skb);
++      block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
+       if (abs_offset < block_limit) {
+-              *data = st->cur_skb->data + abs_offset;
++              *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
+               return block_limit - abs_offset;
+       }
+@@ -2110,13 +2099,14 @@
+               st->frag_data = NULL;
+       }
+-      if (st->cur_skb->next) {
+-              st->cur_skb = st->cur_skb->next;
++      if (st->root_skb == st->cur_skb &&
++          skb_shinfo(st->root_skb)->frag_list) {
++              st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
+               st->frag_idx = 0;
+               goto next_skb;
+-      } else if (st->root_skb == st->cur_skb &&
+-                 skb_shinfo(st->root_skb)->frag_list) {
+-              st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
++      } else if (st->cur_skb->next) {
++              st->cur_skb = st->cur_skb->next;
++              st->frag_idx = 0;
+               goto next_skb;
+       }
+--- kernel-maemo-2.6.28.test.orig/net/core/sock.c
++++ kernel-maemo-2.6.28.test/net/core/sock.c
+@@ -696,6 +696,8 @@
+       if (len < 0)
+               return -EINVAL;
++      memset(&v, 0, sizeof(v));
++
+       switch(optname) {
+       case SO_DEBUG:
+               v.val = sock_flag(sk, SOCK_DBG);
+@@ -1150,7 +1152,6 @@
+ {
+       struct sock *sk = skb->sk;
+-      skb_truesize_check(skb);
+       atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
+       sk_mem_uncharge(skb->sk, skb->truesize);
+ }
+--- kernel-maemo-2.6.28.test.orig/net/ipv4/ipconfig.c
++++ kernel-maemo-2.6.28.test/net/ipv4/ipconfig.c
+@@ -1272,6 +1272,9 @@
+ static int __init ip_auto_config(void)
+ {
+       __be32 addr;
++#ifdef IPCONFIG_DYNAMIC
++      int retries = CONF_OPEN_RETRIES;
++#endif
+ #ifdef CONFIG_PROC_FS
+       proc_net_fops_create(&init_net, "pnp", S_IRUGO, &pnp_seq_fops);
+@@ -1308,9 +1311,6 @@
+ #endif
+           ic_first_dev->next) {
+ #ifdef IPCONFIG_DYNAMIC
+-
+-              int retries = CONF_OPEN_RETRIES;
+-
+               if (ic_dynamic() < 0) {
+                       ic_close_devs();
+--- kernel-maemo-2.6.28.test.orig/net/ipv4/netfilter/arp_tables.c
++++ kernel-maemo-2.6.28.test/net/ipv4/netfilter/arp_tables.c
+@@ -374,7 +374,9 @@
+                           && unconditional(&e->arp)) || visited) {
+                               unsigned int oldpos, size;
+-                              if (t->verdict < -NF_MAX_VERDICT - 1) {
++                              if ((strcmp(t->target.u.user.name,
++                                          ARPT_STANDARD_TARGET) == 0) &&
++                                  t->verdict < -NF_MAX_VERDICT - 1) {
+                                       duprintf("mark_source_chains: bad "
+                                               "negative verdict (%i)\n",
+                                                               t->verdict);
+--- kernel-maemo-2.6.28.test.orig/net/ipv4/netfilter/ip_tables.c
++++ kernel-maemo-2.6.28.test/net/ipv4/netfilter/ip_tables.c
+@@ -500,7 +500,9 @@
+                           && unconditional(&e->ip)) || visited) {
+                               unsigned int oldpos, size;
+-                              if (t->verdict < -NF_MAX_VERDICT - 1) {
++                              if ((strcmp(t->target.u.user.name,
++                                          IPT_STANDARD_TARGET) == 0) &&
++                                  t->verdict < -NF_MAX_VERDICT - 1) {
+                                       duprintf("mark_source_chains: bad "
+                                               "negative verdict (%i)\n",
+                                                               t->verdict);
+--- kernel-maemo-2.6.28.test.orig/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
++++ kernel-maemo-2.6.28.test/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+@@ -20,7 +20,7 @@
+ #include <net/netfilter/nf_conntrack_core.h>
+ #include <net/netfilter/nf_log.h>
+-static unsigned long nf_ct_icmp_timeout __read_mostly = 30*HZ;
++static unsigned int nf_ct_icmp_timeout __read_mostly = 30*HZ;
+ static bool icmp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
+                             struct nf_conntrack_tuple *tuple)
+--- kernel-maemo-2.6.28.test.orig/net/ipv4/tcp.c
++++ kernel-maemo-2.6.28.test/net/ipv4/tcp.c
+@@ -520,8 +520,13 @@
+                               unsigned int offset, size_t len)
+ {
+       struct tcp_splice_state *tss = rd_desc->arg.data;
++      int ret;
+-      return skb_splice_bits(skb, offset, tss->pipe, tss->len, tss->flags);
++      ret = skb_splice_bits(skb, offset, tss->pipe, min(rd_desc->count, len),
++                            tss->flags);
++      if (ret > 0)
++              rd_desc->count -= ret;
++      return ret;
+ }
+ static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
+@@ -529,6 +534,7 @@
+       /* Store TCP splice context information in read_descriptor_t. */
+       read_descriptor_t rd_desc = {
+               .arg.data = tss,
++              .count    = tss->len,
+       };
+       return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
+@@ -578,10 +584,6 @@
+               else if (!ret) {
+                       if (spliced)
+                               break;
+-                      if (flags & SPLICE_F_NONBLOCK) {
+-                              ret = -EAGAIN;
+-                              break;
+-                      }
+                       if (sock_flag(sk, SOCK_DONE))
+                               break;
+                       if (sk->sk_err) {
+@@ -599,6 +601,10 @@
+                                       ret = -ENOTCONN;
+                               break;
+                       }
++                      if (flags & SPLICE_F_NONBLOCK) {
++                              ret = -EAGAIN;
++                              break;
++                      }
+                       if (!timeo) {
+                               ret = -EAGAIN;
+                               break;
+@@ -613,11 +619,13 @@
+               tss.len -= ret;
+               spliced += ret;
++              if (!timeo)
++                      break;
+               release_sock(sk);
+               lock_sock(sk);
+               if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
+-                  (sk->sk_shutdown & RCV_SHUTDOWN) || !timeo ||
++                  (sk->sk_shutdown & RCV_SHUTDOWN) ||
+                   signal_pending(current))
+                       break;
+       }
+--- kernel-maemo-2.6.28.test.orig/net/ipv4/udp.c
++++ kernel-maemo-2.6.28.test/net/ipv4/udp.c
+@@ -942,9 +942,11 @@
+       if ((rc = sock_queue_rcv_skb(sk, skb)) < 0) {
+               /* Note that an ENOMEM error is charged twice */
+-              if (rc == -ENOMEM)
++              if (rc == -ENOMEM) {
+                       UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
+                                        is_udplite);
++                      atomic_inc(&sk->sk_drops);
++              }
+               goto drop;
+       }
+@@ -1155,7 +1157,7 @@
+                  int proto)
+ {
+       struct sock *sk;
+-      struct udphdr *uh = udp_hdr(skb);
++      struct udphdr *uh;
+       unsigned short ulen;
+       struct rtable *rt = (struct rtable*)skb->dst;
+       __be32 saddr = ip_hdr(skb)->saddr;
+@@ -1168,6 +1170,7 @@
+       if (!pskb_may_pull(skb, sizeof(struct udphdr)))
+               goto drop;              /* No space for header. */
++      uh   = udp_hdr(skb);
+       ulen = ntohs(uh->len);
+       if (ulen > skb->len)
+               goto short_packet;
+--- kernel-maemo-2.6.28.test.orig/net/ipv6/inet6_hashtables.c
++++ kernel-maemo-2.6.28.test/net/ipv6/inet6_hashtables.c
+@@ -210,11 +210,11 @@
+       if (twp != NULL) {
+               *twp = tw;
+-              NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITRECYCLED);
++              NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
+       } else if (tw != NULL) {
+               /* Silly. Should hash-dance instead... */
+               inet_twsk_deschedule(tw, death_row);
+-              NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITRECYCLED);
++              NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
+               inet_twsk_put(tw);
+       }
+--- kernel-maemo-2.6.28.test.orig/net/ipv6/ip6_fib.c
++++ kernel-maemo-2.6.28.test/net/ipv6/ip6_fib.c
+@@ -298,6 +298,10 @@
+       struct fib6_walker_t *w = (void*)cb->args[2];
+       if (w) {
++              if (cb->args[4]) {
++                      cb->args[4] = 0;
++                      fib6_walker_unlink(w);
++              }
+               cb->args[2] = 0;
+               kfree(w);
+       }
+@@ -330,15 +334,12 @@
+               read_lock_bh(&table->tb6_lock);
+               res = fib6_walk_continue(w);
+               read_unlock_bh(&table->tb6_lock);
+-              if (res != 0) {
+-                      if (res < 0)
+-                              fib6_walker_unlink(w);
+-                      goto end;
++              if (res <= 0) {
++                      fib6_walker_unlink(w);
++                      cb->args[4] = 0;
+               }
+-              fib6_walker_unlink(w);
+-              cb->args[4] = 0;
+       }
+-end:
++
+       return res;
+ }
+--- kernel-maemo-2.6.28.test.orig/net/ipv6/ip6_flowlabel.c
++++ kernel-maemo-2.6.28.test/net/ipv6/ip6_flowlabel.c
+@@ -323,17 +323,21 @@
+ fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval,
+         int optlen, int *err_p)
+ {
+-      struct ip6_flowlabel *fl;
++      struct ip6_flowlabel *fl = NULL;
+       int olen;
+       int addr_type;
+       int err;
++      olen = optlen - CMSG_ALIGN(sizeof(*freq));
++      err = -EINVAL;
++      if (olen > 64 * 1024)
++              goto done;
++
+       err = -ENOMEM;
+       fl = kzalloc(sizeof(*fl), GFP_KERNEL);
+       if (fl == NULL)
+               goto done;
+-      olen = optlen - CMSG_ALIGN(sizeof(*freq));
+       if (olen > 0) {
+               struct msghdr msg;
+               struct flowi flowi;
+--- kernel-maemo-2.6.28.test.orig/net/ipv6/ip6_input.c
++++ kernel-maemo-2.6.28.test/net/ipv6/ip6_input.c
+@@ -75,8 +75,7 @@
+       if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL ||
+           !idev || unlikely(idev->cnf.disable_ipv6)) {
+               IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDISCARDS);
+-              rcu_read_unlock();
+-              goto out;
++              goto drop;
+       }
+       memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
+@@ -147,7 +146,6 @@
+ drop:
+       rcu_read_unlock();
+       kfree_skb(skb);
+-out:
+       return 0;
+ }
+--- kernel-maemo-2.6.28.test.orig/net/ipv6/ip6_output.c
++++ kernel-maemo-2.6.28.test/net/ipv6/ip6_output.c
+@@ -1104,6 +1104,18 @@
+       return err;
+ }
++static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
++                                             gfp_t gfp)
++{
++      return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
++}
++
++static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
++                                              gfp_t gfp)
++{
++      return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
++}
++
+ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
+       int offset, int len, int odd, struct sk_buff *skb),
+       void *from, int length, int transhdrlen,
+@@ -1129,17 +1141,37 @@
+                * setup for corking
+                */
+               if (opt) {
+-                      if (np->cork.opt == NULL) {
+-                              np->cork.opt = kmalloc(opt->tot_len,
+-                                                     sk->sk_allocation);
+-                              if (unlikely(np->cork.opt == NULL))
+-                                      return -ENOBUFS;
+-                      } else if (np->cork.opt->tot_len < opt->tot_len) {
+-                              printk(KERN_DEBUG "ip6_append_data: invalid option length\n");
++                      if (WARN_ON(np->cork.opt))
+                               return -EINVAL;
+-                      }
+-                      memcpy(np->cork.opt, opt, opt->tot_len);
+-                      inet->cork.flags |= IPCORK_OPT;
++
++                      np->cork.opt = kmalloc(opt->tot_len, sk->sk_allocation);
++                      if (unlikely(np->cork.opt == NULL))
++                              return -ENOBUFS;
++
++                      np->cork.opt->tot_len = opt->tot_len;
++                      np->cork.opt->opt_flen = opt->opt_flen;
++                      np->cork.opt->opt_nflen = opt->opt_nflen;
++
++                      np->cork.opt->dst0opt = ip6_opt_dup(opt->dst0opt,
++                                                          sk->sk_allocation);
++                      if (opt->dst0opt && !np->cork.opt->dst0opt)
++                              return -ENOBUFS;
++
++                      np->cork.opt->dst1opt = ip6_opt_dup(opt->dst1opt,
++                                                          sk->sk_allocation);
++                      if (opt->dst1opt && !np->cork.opt->dst1opt)
++                              return -ENOBUFS;
++
++                      np->cork.opt->hopopt = ip6_opt_dup(opt->hopopt,
++                                                         sk->sk_allocation);
++                      if (opt->hopopt && !np->cork.opt->hopopt)
++                              return -ENOBUFS;
++
++                      np->cork.opt->srcrt = ip6_rthdr_dup(opt->srcrt,
++                                                          sk->sk_allocation);
++                      if (opt->srcrt && !np->cork.opt->srcrt)
++                              return -ENOBUFS;
++
+                       /* need source address above miyazawa*/
+               }
+               dst_hold(&rt->u.dst);
+@@ -1166,8 +1198,7 @@
+       } else {
+               rt = (struct rt6_info *)inet->cork.dst;
+               fl = &inet->cork.fl;
+-              if (inet->cork.flags & IPCORK_OPT)
+-                      opt = np->cork.opt;
++              opt = np->cork.opt;
+               transhdrlen = 0;
+               exthdrlen = 0;
+               mtu = inet->cork.fragsize;
+@@ -1406,9 +1437,15 @@
+ static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np)
+ {
+-      inet->cork.flags &= ~IPCORK_OPT;
+-      kfree(np->cork.opt);
+-      np->cork.opt = NULL;
++      if (np->cork.opt) {
++              kfree(np->cork.opt->dst0opt);
++              kfree(np->cork.opt->dst1opt);
++              kfree(np->cork.opt->hopopt);
++              kfree(np->cork.opt->srcrt);
++              kfree(np->cork.opt);
++              np->cork.opt = NULL;
++      }
++
+       if (inet->cork.dst) {
+               dst_release(inet->cork.dst);
+               inet->cork.dst = NULL;
+--- kernel-maemo-2.6.28.test.orig/net/ipv6/netfilter/ip6_tables.c
++++ kernel-maemo-2.6.28.test/net/ipv6/netfilter/ip6_tables.c
+@@ -525,7 +525,9 @@
+                           && unconditional(&e->ipv6)) || visited) {
+                               unsigned int oldpos, size;
+-                              if (t->verdict < -NF_MAX_VERDICT - 1) {
++                              if ((strcmp(t->target.u.user.name,
++                                          IP6T_STANDARD_TARGET) == 0) &&
++                                  t->verdict < -NF_MAX_VERDICT - 1) {
+                                       duprintf("mark_source_chains: bad "
+                                               "negative verdict (%i)\n",
+                                                               t->verdict);
+--- kernel-maemo-2.6.28.test.orig/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
++++ kernel-maemo-2.6.28.test/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+@@ -26,7 +26,7 @@
+ #include <net/netfilter/ipv6/nf_conntrack_icmpv6.h>
+ #include <net/netfilter/nf_log.h>
+-static unsigned long nf_ct_icmpv6_timeout __read_mostly = 30*HZ;
++static unsigned int nf_ct_icmpv6_timeout __read_mostly = 30*HZ;
+ static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb,
+                               unsigned int dataoff,
+@@ -49,8 +49,8 @@
+ static const u_int8_t invmap[] = {
+       [ICMPV6_ECHO_REQUEST - 128]     = ICMPV6_ECHO_REPLY + 1,
+       [ICMPV6_ECHO_REPLY - 128]       = ICMPV6_ECHO_REQUEST + 1,
+-      [ICMPV6_NI_QUERY - 128]         = ICMPV6_NI_QUERY + 1,
+-      [ICMPV6_NI_REPLY - 128]         = ICMPV6_NI_REPLY +1
++      [ICMPV6_NI_QUERY - 128]         = ICMPV6_NI_REPLY + 1,
++      [ICMPV6_NI_REPLY - 128]         = ICMPV6_NI_QUERY +1
+ };
+ static bool icmpv6_invert_tuple(struct nf_conntrack_tuple *tuple,
+--- kernel-maemo-2.6.28.test.orig/net/mac80211/tx.c
++++ kernel-maemo-2.6.28.test/net/mac80211/tx.c
+@@ -1302,8 +1302,10 @@
+               if (is_multicast_ether_addr(hdr->addr3))
+                       memcpy(hdr->addr1, hdr->addr3, ETH_ALEN);
+               else
+-                      if (mesh_nexthop_lookup(skb, osdata))
+-                              return  0;
++                      if (mesh_nexthop_lookup(skb, osdata)) {
++                              dev_put(odev);
++                              return 0;
++                      }
+               if (memcmp(odev->dev_addr, hdr->addr4, ETH_ALEN) != 0)
+                       IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.mesh,
+                                                           fwded_frames);
+@@ -1336,6 +1338,8 @@
+                                               list) {
+                               if (!netif_running(sdata->dev))
+                                       continue;
++                              if (sdata->vif.type != NL80211_IFTYPE_AP)
++                                      continue;
+                               if (compare_ether_addr(sdata->dev->dev_addr,
+                                                      hdr->addr2)) {
+                                       dev_hold(sdata->dev);
+--- kernel-maemo-2.6.28.test.orig/net/netfilter/nf_conntrack_netlink.c
++++ kernel-maemo-2.6.28.test/net/netfilter/nf_conntrack_netlink.c
+@@ -825,13 +825,16 @@
+       if (!parse_nat_setup) {
+ #ifdef CONFIG_MODULES
+               rcu_read_unlock();
++              spin_unlock_bh(&nf_conntrack_lock);
+               nfnl_unlock();
+               if (request_module("nf-nat-ipv4") < 0) {
+                       nfnl_lock();
++                      spin_lock_bh(&nf_conntrack_lock);
+                       rcu_read_lock();
+                       return -EOPNOTSUPP;
+               }
+               nfnl_lock();
++              spin_lock_bh(&nf_conntrack_lock);
+               rcu_read_lock();
+               if (nfnetlink_parse_nat_setup_hook)
+                       return -EAGAIN;
+--- kernel-maemo-2.6.28.test.orig/net/netfilter/nf_conntrack_proto_tcp.c
++++ kernel-maemo-2.6.28.test/net/netfilter/nf_conntrack_proto_tcp.c
+@@ -15,6 +15,7 @@
+ #include <linux/skbuff.h>
+ #include <linux/ipv6.h>
+ #include <net/ip6_checksum.h>
++#include <asm/unaligned.h>
+ #include <net/tcp.h>
+@@ -466,7 +467,7 @@
+                               for (i = 0;
+                                    i < (opsize - TCPOLEN_SACK_BASE);
+                                    i += TCPOLEN_SACK_PERBLOCK) {
+-                                      tmp = ntohl(*((__be32 *)(ptr+i)+1));
++                                      tmp = get_unaligned_be32((__be32 *)(ptr+i)+1);
+                                       if (after(tmp, *sack))
+                                               *sack = tmp;
+--- kernel-maemo-2.6.28.test.orig/net/netfilter/x_tables.c
++++ kernel-maemo-2.6.28.test/net/netfilter/x_tables.c
+@@ -273,6 +273,10 @@
+                               have_rev = 1;
+               }
+       }
++
++      if (af != NFPROTO_UNSPEC && !have_rev)
++              return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
++
+       return have_rev;
+ }
+@@ -289,6 +293,10 @@
+                               have_rev = 1;
+               }
+       }
++
++      if (af != NFPROTO_UNSPEC && !have_rev)
++              return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
++
+       return have_rev;
+ }
+--- kernel-maemo-2.6.28.test.orig/net/netfilter/xt_sctp.c
++++ kernel-maemo-2.6.28.test/net/netfilter/xt_sctp.c
+@@ -105,7 +105,7 @@
+       switch (chunk_match_type) {
+       case SCTP_CHUNK_MATCH_ALL:
+-              return SCTP_CHUNKMAP_IS_CLEAR(info->chunkmap);
++              return SCTP_CHUNKMAP_IS_CLEAR(chunkmapcopy);
+       case SCTP_CHUNK_MATCH_ANY:
+               return false;
+       case SCTP_CHUNK_MATCH_ONLY:
+--- kernel-maemo-2.6.28.test.orig/net/netrom/af_netrom.c
++++ kernel-maemo-2.6.28.test/net/netrom/af_netrom.c
+@@ -1082,7 +1082,13 @@
+       SOCK_DEBUG(sk, "NET/ROM: sendto: Addresses built.\n");
+-      /* Build a packet */
++      /* Build a packet - the conventional user limit is 236 bytes. We can
++         do ludicrously large NetROM frames but must not overflow */
++      if (len > 65536) {
++              err = -EMSGSIZE;
++              goto out;
++      }
++
+       SOCK_DEBUG(sk, "NET/ROM: sendto: building packet.\n");
+       size = len + NR_NETWORK_LEN + NR_TRANSPORT_LEN;
+--- kernel-maemo-2.6.28.test.orig/net/packet/af_packet.c
++++ kernel-maemo-2.6.28.test/net/packet/af_packet.c
+@@ -77,6 +77,7 @@
+ #include <linux/poll.h>
+ #include <linux/module.h>
+ #include <linux/init.h>
++#include <linux/mutex.h>
+ #ifdef CONFIG_INET
+ #include <net/inet_common.h>
+@@ -175,6 +176,7 @@
+ #endif
+       struct packet_type      prot_hook;
+       spinlock_t              bind_lock;
++      struct mutex            pg_vec_lock;
+       unsigned int            running:1,      /* prot_hook is attached*/
+                               auxdata:1,
+                               origdev:1;
+@@ -220,13 +222,13 @@
+       h.raw = po->pg_vec[pg_vec_pos] + (frame_offset * po->frame_size);
+       switch (po->tp_version) {
+       case TPACKET_V1:
+-              if (status != h.h1->tp_status ? TP_STATUS_USER :
+-                                              TP_STATUS_KERNEL)
++              if (status != (h.h1->tp_status ? TP_STATUS_USER :
++                                              TP_STATUS_KERNEL))
+                       return NULL;
+               break;
+       case TPACKET_V2:
+-              if (status != h.h2->tp_status ? TP_STATUS_USER :
+-                                              TP_STATUS_KERNEL)
++              if (status != (h.h2->tp_status ? TP_STATUS_USER :
++                                              TP_STATUS_KERNEL))
+                       return NULL;
+               break;
+       }
+@@ -1068,6 +1070,7 @@
+        */
+       spin_lock_init(&po->bind_lock);
++      mutex_init(&po->pg_vec_lock);
+       po->prot_hook.func = packet_rcv;
+       if (sock->type == SOCK_PACKET)
+@@ -1863,6 +1866,7 @@
+       synchronize_net();
+       err = -EBUSY;
++      mutex_lock(&po->pg_vec_lock);
+       if (closing || atomic_read(&po->mapped) == 0) {
+               err = 0;
+ #define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; })
+@@ -1884,6 +1888,7 @@
+               if (atomic_read(&po->mapped))
+                       printk(KERN_DEBUG "packet_mmap: vma is busy: %d\n", atomic_read(&po->mapped));
+       }
++      mutex_unlock(&po->pg_vec_lock);
+       spin_lock(&po->bind_lock);
+       if (was_running && !po->running) {
+@@ -1916,7 +1921,7 @@
+       size = vma->vm_end - vma->vm_start;
+-      lock_sock(sk);
++      mutex_lock(&po->pg_vec_lock);
+       if (po->pg_vec == NULL)
+               goto out;
+       if (size != po->pg_vec_len*po->pg_vec_pages*PAGE_SIZE)
+@@ -1939,7 +1944,7 @@
+       err = 0;
+ out:
+-      release_sock(sk);
++      mutex_unlock(&po->pg_vec_lock);
+       return err;
+ }
+ #endif
+--- kernel-maemo-2.6.28.test.orig/net/rose/af_rose.c
++++ kernel-maemo-2.6.28.test/net/rose/af_rose.c
+@@ -1124,6 +1124,10 @@
+       /* Build a packet */
+       SOCK_DEBUG(sk, "ROSE: sendto: building packet.\n");
++      /* Sanity check the packet size */
++      if (len > 65535)
++              return -EMSGSIZE;
++
+       size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN;
+       if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL)
+--- kernel-maemo-2.6.28.test.orig/net/sched/cls_u32.c
++++ kernel-maemo-2.6.28.test/net/sched/cls_u32.c
+@@ -637,8 +637,9 @@
+                               break;
+               n->next = *ins;
+-              wmb();
++              tcf_tree_lock(tp);
+               *ins = n;
++              tcf_tree_unlock(tp);
+               *arg = (unsigned long)n;
+               return 0;
+--- kernel-maemo-2.6.28.test.orig/net/sched/sch_htb.c
++++ kernel-maemo-2.6.28.test/net/sched/sch_htb.c
+@@ -924,6 +924,7 @@
+               }
+       }
+       sch->qstats.overlimits++;
++      qdisc_watchdog_cancel(&q->watchdog);
+       qdisc_watchdog_schedule(&q->watchdog, next_event);
+ fin:
+       return skb;
+--- kernel-maemo-2.6.28.test.orig/net/sctp/endpointola.c
++++ kernel-maemo-2.6.28.test/net/sctp/endpointola.c
+@@ -111,7 +111,8 @@
+               if (sctp_addip_enable) {
+                       auth_chunks->chunks[0] = SCTP_CID_ASCONF;
+                       auth_chunks->chunks[1] = SCTP_CID_ASCONF_ACK;
+-                      auth_chunks->param_hdr.length += htons(2);
++                      auth_chunks->param_hdr.length =
++                                      htons(sizeof(sctp_paramhdr_t) + 2);
+               }
+       }
+--- kernel-maemo-2.6.28.test.orig/net/sctp/input.c
++++ kernel-maemo-2.6.28.test/net/sctp/input.c
+@@ -249,6 +249,19 @@
+        */
+       sctp_bh_lock_sock(sk);
++      if (sk != rcvr->sk) {
++              /* Our cached sk is different from the rcvr->sk.  This is
++               * because migrate()/accept() may have moved the association
++               * to a new socket and released all the sockets.  So now we
++               * are holding a lock on the old socket while the user may
++               * be doing something with the new socket.  Switch our veiw
++               * of the current sk.
++               */
++              sctp_bh_unlock_sock(sk);
++              sk = rcvr->sk;
++              sctp_bh_lock_sock(sk);
++      }
++
+       if (sock_owned_by_user(sk)) {
+               SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG);
+               sctp_add_backlog(sk, skb);
+--- kernel-maemo-2.6.28.test.orig/net/sctp/output.c
++++ kernel-maemo-2.6.28.test/net/sctp/output.c
+@@ -324,14 +324,16 @@
+       switch (chunk->chunk_hdr->type) {
+           case SCTP_CID_DATA:
+               retval = sctp_packet_append_data(packet, chunk);
++              if (SCTP_XMIT_OK != retval)
++                      goto finish;
+               /* Disallow SACK bundling after DATA. */
+               packet->has_sack = 1;
+               /* Disallow AUTH bundling after DATA */
+               packet->has_auth = 1;
+               /* Let it be knows that packet has DATA in it */
+               packet->has_data = 1;
+-              if (SCTP_XMIT_OK != retval)
+-                      goto finish;
++              /* timestamp the chunk for rtx purposes */
++              chunk->sent_at = jiffies;
+               break;
+           case SCTP_CID_COOKIE_ECHO:
+               packet->has_cookie_echo = 1;
+@@ -470,7 +472,6 @@
+                       } else
+                               chunk->resent = 1;
+-                      chunk->sent_at = jiffies;
+                       has_data = 1;
+               }
+--- kernel-maemo-2.6.28.test.orig/net/sctp/outqueue.c
++++ kernel-maemo-2.6.28.test/net/sctp/outqueue.c
+@@ -929,7 +929,6 @@
+               }
+               /* Finally, transmit new packets.  */
+-              start_timer = 0;
+               while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
+                       /* RFC 2960 6.5 Every DATA chunk MUST carry a valid
+                        * stream identifier.
+@@ -1028,7 +1027,7 @@
+                       list_add_tail(&chunk->transmitted_list,
+                                     &transport->transmitted);
+-                      sctp_transport_reset_timers(transport, start_timer-1);
++                      sctp_transport_reset_timers(transport, 0);
+                       q->empty = 0;
+--- kernel-maemo-2.6.28.test.orig/net/sctp/sm_statefuns.c
++++ kernel-maemo-2.6.28.test/net/sctp/sm_statefuns.c
+@@ -3691,6 +3691,7 @@
+ {
+       struct sctp_chunk *chunk = arg;
+       struct sctp_fwdtsn_hdr *fwdtsn_hdr;
++      struct sctp_fwdtsn_skip *skip;
+       __u16 len;
+       __u32 tsn;
+@@ -3720,6 +3721,12 @@
+       if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0)
+               goto discard_noforce;
++      /* Silently discard the chunk if stream-id is not valid */
++      sctp_walk_fwdtsn(skip, chunk) {
++              if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams)
++                      goto discard_noforce;
++      }
++
+       sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
+       if (len > sizeof(struct sctp_fwdtsn_hdr))
+               sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
+@@ -3751,6 +3758,7 @@
+ {
+       struct sctp_chunk *chunk = arg;
+       struct sctp_fwdtsn_hdr *fwdtsn_hdr;
++      struct sctp_fwdtsn_skip *skip;
+       __u16 len;
+       __u32 tsn;
+@@ -3780,6 +3788,12 @@
+       if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0)
+               goto gen_shutdown;
++      /* Silently discard the chunk if stream-id is not valid */
++      sctp_walk_fwdtsn(skip, chunk) {
++              if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams)
++                      goto gen_shutdown;
++      }
++
+       sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
+       if (len > sizeof(struct sctp_fwdtsn_hdr))
+               sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
+--- kernel-maemo-2.6.28.test.orig/net/socket.c
++++ kernel-maemo-2.6.28.test/net/socket.c
+@@ -1215,7 +1215,7 @@
+       return __sock_create(&init_net, family, type, protocol, res, 1);
+ }
+-asmlinkage long sys_socket(int family, int type, int protocol)
++SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
+ {
+       int retval;
+       struct socket *sock;
+@@ -1256,8 +1256,8 @@
+  *    Create a pair of connected sockets.
+  */
+-asmlinkage long sys_socketpair(int family, int type, int protocol,
+-                             int __user *usockvec)
++SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol,
++              int __user *, usockvec)
+ {
+       struct socket *sock1, *sock2;
+       int fd1, fd2, err;
+@@ -1364,7 +1364,7 @@
+  *    the protocol layer (having also checked the address is ok).
+  */
+-asmlinkage long sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen)
++SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
+ {
+       struct socket *sock;
+       struct sockaddr_storage address;
+@@ -1393,7 +1393,7 @@
+  *    ready for listening.
+  */
+-asmlinkage long sys_listen(int fd, int backlog)
++SYSCALL_DEFINE2(listen, int, fd, int, backlog)
+ {
+       struct socket *sock;
+       int err, fput_needed;
+@@ -1426,8 +1426,8 @@
+  *    clean when we restucture accept also.
+  */
+-asmlinkage long sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
+-                          int __user *upeer_addrlen, int flags)
++SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
++              int __user *, upeer_addrlen, int, flags)
+ {
+       struct socket *sock, *newsock;
+       struct file *newfile;
+@@ -1510,8 +1510,8 @@
+       goto out_put;
+ }
+-asmlinkage long sys_accept(int fd, struct sockaddr __user *upeer_sockaddr,
+-                         int __user *upeer_addrlen)
++SYSCALL_DEFINE3(accept, int, fd, struct sockaddr __user *, upeer_sockaddr,
++              int __user *, upeer_addrlen)
+ {
+       return sys_accept4(fd, upeer_sockaddr, upeer_addrlen, 0);
+ }
+@@ -1528,8 +1528,8 @@
+  *    include the -EINPROGRESS status for such sockets.
+  */
+-asmlinkage long sys_connect(int fd, struct sockaddr __user *uservaddr,
+-                          int addrlen)
++SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
++              int, addrlen)
+ {
+       struct socket *sock;
+       struct sockaddr_storage address;
+@@ -1560,8 +1560,8 @@
+  *    name to user space.
+  */
+-asmlinkage long sys_getsockname(int fd, struct sockaddr __user *usockaddr,
+-                              int __user *usockaddr_len)
++SYSCALL_DEFINE3(getsockname, int, fd, struct sockaddr __user *, usockaddr,
++              int __user *, usockaddr_len)
+ {
+       struct socket *sock;
+       struct sockaddr_storage address;
+@@ -1591,8 +1591,8 @@
+  *    name to user space.
+  */
+-asmlinkage long sys_getpeername(int fd, struct sockaddr __user *usockaddr,
+-                              int __user *usockaddr_len)
++SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr,
++              int __user *, usockaddr_len)
+ {
+       struct socket *sock;
+       struct sockaddr_storage address;
+@@ -1623,9 +1623,9 @@
+  *    the protocol.
+  */
+-asmlinkage long sys_sendto(int fd, void __user *buff, size_t len,
+-                         unsigned flags, struct sockaddr __user *addr,
+-                         int addr_len)
++SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
++              unsigned, flags, struct sockaddr __user *, addr,
++              int, addr_len)
+ {
+       struct socket *sock;
+       struct sockaddr_storage address;
+@@ -1668,7 +1668,8 @@
+  *    Send a datagram down a socket.
+  */
+-asmlinkage long sys_send(int fd, void __user *buff, size_t len, unsigned flags)
++SYSCALL_DEFINE4(send, int, fd, void __user *, buff, size_t, len,
++              unsigned, flags)
+ {
+       return sys_sendto(fd, buff, len, flags, NULL, 0);
+ }
+@@ -1679,9 +1680,9 @@
+  *    sender address from kernel to user space.
+  */
+-asmlinkage long sys_recvfrom(int fd, void __user *ubuf, size_t size,
+-                           unsigned flags, struct sockaddr __user *addr,
+-                           int __user *addr_len)
++SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
++              unsigned, flags, struct sockaddr __user *, addr,
++              int __user *, addr_len)
+ {
+       struct socket *sock;
+       struct iovec iov;
+@@ -1733,8 +1734,8 @@
+  *    to pass the user mode parameter for the protocols to sort out.
+  */
+-asmlinkage long sys_setsockopt(int fd, int level, int optname,
+-                             char __user *optval, int optlen)
++SYSCALL_DEFINE5(setsockopt, int, fd, int, level, int, optname,
++              char __user *, optval, int, optlen)
+ {
+       int err, fput_needed;
+       struct socket *sock;
+@@ -1767,8 +1768,8 @@
+  *    to pass a user mode parameter for the protocols to sort out.
+  */
+-asmlinkage long sys_getsockopt(int fd, int level, int optname,
+-                             char __user *optval, int __user *optlen)
++SYSCALL_DEFINE5(getsockopt, int, fd, int, level, int, optname,
++              char __user *, optval, int __user *, optlen)
+ {
+       int err, fput_needed;
+       struct socket *sock;
+@@ -1797,7 +1798,7 @@
+  *    Shutdown a socket.
+  */
+-asmlinkage long sys_shutdown(int fd, int how)
++SYSCALL_DEFINE2(shutdown, int, fd, int, how)
+ {
+       int err, fput_needed;
+       struct socket *sock;
+@@ -1823,7 +1824,7 @@
+  *    BSD sendmsg interface
+  */
+-asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags)
++SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
+ {
+       struct compat_msghdr __user *msg_compat =
+           (struct compat_msghdr __user *)msg;
+@@ -1929,8 +1930,8 @@
+  *    BSD recvmsg interface
+  */
+-asmlinkage long sys_recvmsg(int fd, struct msghdr __user *msg,
+-                          unsigned int flags)
++SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg,
++              unsigned int, flags)
+ {
+       struct compat_msghdr __user *msg_compat =
+           (struct compat_msghdr __user *)msg;
+@@ -2053,7 +2054,7 @@
+  *  it is set by the callees.
+  */
+-asmlinkage long sys_socketcall(int call, unsigned long __user *args)
++SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
+ {
+       unsigned long a[6];
+       unsigned long a0, a1;
+--- kernel-maemo-2.6.28.test.orig/net/x25/af_x25.c
++++ kernel-maemo-2.6.28.test/net/x25/af_x25.c
+@@ -1037,6 +1037,12 @@
+               sx25.sx25_addr   = x25->dest_addr;
+       }
++      /* Sanity check the packet size */
++      if (len > 65535) {
++              rc = -EMSGSIZE;
++              goto out;
++      }
++
+       SOCK_DEBUG(sk, "x25_sendmsg: sendto: Addresses built.\n");
+       /* Build a packet */
+--- kernel-maemo-2.6.28.test.orig/net/xfrm/xfrm_state.c
++++ kernel-maemo-2.6.28.test/net/xfrm/xfrm_state.c
+@@ -1601,7 +1601,7 @@
+       spin_lock_bh(&xfrm_state_lock);
+       list_del(&walk->all);
+-      spin_lock_bh(&xfrm_state_lock);
++      spin_unlock_bh(&xfrm_state_lock);
+ }
+ EXPORT_SYMBOL(xfrm_state_walk_done);
+--- kernel-maemo-2.6.28.test.orig/scripts/kernel-doc
++++ kernel-maemo-2.6.28.test/scripts/kernel-doc
+@@ -1762,6 +1762,40 @@
+     $state = 0;
+ }
++sub syscall_munge() {
++      my $void = 0;
++
++      $prototype =~ s@[\r\n\t]+@ @gos; # strip newlines/CR's/tabs
++##    if ($prototype =~ m/SYSCALL_DEFINE0\s*\(\s*(a-zA-Z0-9_)*\s*\)/) {
++      if ($prototype =~ m/SYSCALL_DEFINE0/) {
++              $void = 1;
++##            $prototype = "long sys_$1(void)";
++      }
++
++      $prototype =~ s/SYSCALL_DEFINE.*\(/long sys_/; # fix return type & func name
++      if ($prototype =~ m/long (sys_.*?),/) {
++              $prototype =~ s/,/\(/;
++      } elsif ($void) {
++              $prototype =~ s/\)/\(void\)/;
++      }
++
++      # now delete all of the odd-number commas in $prototype
++      # so that arg types & arg names don't have a comma between them
++      my $count = 0;
++      my $len = length($prototype);
++      if ($void) {
++              $len = 0;       # skip the for-loop
++      }
++      for (my $ix = 0; $ix < $len; $ix++) {
++              if (substr($prototype, $ix, 1) eq ',') {
++                      $count++;
++                      if ($count % 2 == 1) {
++                              substr($prototype, $ix, 1) = ' ';
++                      }
++              }
++      }
++}
++
+ sub process_state3_function($$) {
+     my $x = shift;
+     my $file = shift;
+@@ -1774,11 +1808,15 @@
+     elsif ($x =~ /([^\{]*)/) {
+       $prototype .= $1;
+     }
++
+     if (($x =~ /\{/) || ($x =~ /\#\s*define/) || ($x =~ /;/)) {
+       $prototype =~ s@/\*.*?\*/@@gos; # strip comments.
+       $prototype =~ s@[\r\n]+@ @gos; # strip newlines/cr's.
+       $prototype =~ s@^\s+@@gos; # strip leading spaces
+-      dump_function($prototype,$file);
++      if ($prototype =~ /SYSCALL_DEFINE/) {
++              syscall_munge();
++      }
++      dump_function($prototype, $file);
+       reset_state();
+     }
+ }
+--- kernel-maemo-2.6.28.test.orig/scripts/mod/file2alias.c
++++ kernel-maemo-2.6.28.test/scripts/mod/file2alias.c
+@@ -210,6 +210,7 @@
+ static int do_hid_entry(const char *filename,
+                            struct hid_device_id *id, char *alias)
+ {
++      id->bus = TO_NATIVE(id->bus);
+       id->vendor = TO_NATIVE(id->vendor);
+       id->product = TO_NATIVE(id->product);
+--- kernel-maemo-2.6.28.test.orig/security/keys/keyctl.c
++++ kernel-maemo-2.6.28.test/security/keys/keyctl.c
+@@ -54,11 +54,11 @@
+  * - returns the new key's serial number
+  * - implements add_key()
+  */
+-asmlinkage long sys_add_key(const char __user *_type,
+-                          const char __user *_description,
+-                          const void __user *_payload,
+-                          size_t plen,
+-                          key_serial_t ringid)
++SYSCALL_DEFINE5(add_key, const char __user *, _type,
++              const char __user *, _description,
++              const void __user *, _payload,
++              size_t, plen,
++              key_serial_t, ringid)
+ {
+       key_ref_t keyring_ref, key_ref;
+       char type[32], *description;
+@@ -146,10 +146,10 @@
+  *   - if the _callout_info string is empty, it will be rendered as "-"
+  * - implements request_key()
+  */
+-asmlinkage long sys_request_key(const char __user *_type,
+-                              const char __user *_description,
+-                              const char __user *_callout_info,
+-                              key_serial_t destringid)
++SYSCALL_DEFINE4(request_key, const char __user *, _type,
++              const char __user *, _description,
++              const char __user *, _callout_info,
++              key_serial_t, destringid)
+ {
+       struct key_type *ktype;
+       struct key *key;
+@@ -270,6 +270,7 @@
+       /* join the session */
+       ret = join_session_keyring(name);
++      kfree(name);
+  error:
+       return ret;
+@@ -1152,8 +1153,8 @@
+ /*
+  * the key control system call
+  */
+-asmlinkage long sys_keyctl(int option, unsigned long arg2, unsigned long arg3,
+-                         unsigned long arg4, unsigned long arg5)
++SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3,
++              unsigned long, arg4, unsigned long, arg5)
+ {
+       switch (option) {
+       case KEYCTL_GET_KEYRING_ID:
+--- kernel-maemo-2.6.28.test.orig/security/selinux/hooks.c
++++ kernel-maemo-2.6.28.test/security/selinux/hooks.c
+@@ -4550,6 +4550,7 @@
+       if (err)
+               return err;
+       err = avc_has_perm(sk_sid, if_sid, SECCLASS_NETIF, netif_perm, ad);
++      if (err)
+               return err;
+       err = sel_netnode_sid(addrp, family, &node_sid);
+--- kernel-maemo-2.6.28.test.orig/security/selinux/netlabel.c
++++ kernel-maemo-2.6.28.test/security/selinux/netlabel.c
+@@ -386,11 +386,12 @@
+       if (!S_ISSOCK(inode->i_mode) ||
+           ((mask & (MAY_WRITE | MAY_APPEND)) == 0))
+               return 0;
+-
+       sock = SOCKET_I(inode);
+       sk = sock->sk;
++      if (sk == NULL)
++              return 0;
+       sksec = sk->sk_security;
+-      if (sksec->nlbl_state != NLBL_REQUIRE)
++      if (sksec == NULL || sksec->nlbl_state != NLBL_REQUIRE)
+               return 0;
+       local_bh_disable();
+@@ -490,8 +491,10 @@
+               lock_sock(sk);
+               rc = netlbl_sock_getattr(sk, &secattr);
+               release_sock(sk);
+-              if (rc == 0 && secattr.flags != NETLBL_SECATTR_NONE)
++              if (rc == 0)
+                       rc = -EACCES;
++              else if (rc == -ENOMSG)
++                      rc = 0;
+               netlbl_secattr_destroy(&secattr);
+       }
+--- kernel-maemo-2.6.28.test.orig/security/smack/smack_lsm.c
++++ kernel-maemo-2.6.28.test/security/smack/smack_lsm.c
+@@ -604,6 +604,8 @@
+           strcmp(name, XATTR_NAME_SMACKIPOUT) == 0) {
+               if (!capable(CAP_MAC_ADMIN))
+                       rc = -EPERM;
++              if (size == 0)
++                      rc = -EINVAL;
+       } else
+               rc = cap_inode_setxattr(dentry, name, value, size, flags);
+@@ -1360,7 +1362,7 @@
+       struct socket *sock;
+       int rc = 0;
+-      if (value == NULL || size > SMK_LABELLEN)
++      if (value == NULL || size > SMK_LABELLEN || size == 0)
+               return -EACCES;
+       sp = smk_import(value, size);
+--- kernel-maemo-2.6.28.test.orig/sound/core/oss/pcm_oss.c
++++ kernel-maemo-2.6.28.test/sound/core/oss/pcm_oss.c
+@@ -2872,7 +2872,7 @@
+                       setup = kmalloc(sizeof(*setup), GFP_KERNEL);
+                       if (! setup) {
+                               buffer->error = -ENOMEM;
+-                              mutex_lock(&pstr->oss.setup_mutex);
++                              mutex_unlock(&pstr->oss.setup_mutex);
+                               return;
+                       }
+                       if (pstr->oss.setup_list == NULL)
+@@ -2886,7 +2886,7 @@
+                       if (! template.task_name) {
+                               kfree(setup);
+                               buffer->error = -ENOMEM;
+-                              mutex_lock(&pstr->oss.setup_mutex);
++                              mutex_unlock(&pstr->oss.setup_mutex);
+                               return;
+                       }
+               }
+--- kernel-maemo-2.6.28.test.orig/sound/core/oss/rate.c
++++ kernel-maemo-2.6.28.test/sound/core/oss/rate.c
+@@ -157,7 +157,7 @@
+               while (dst_frames1 > 0) {
+                       S1 = S2;
+                       if (src_frames1-- > 0) {
+-                              S1 = *src;
++                              S2 = *src;
+                               src += src_step;
+                       }
+                       if (pos & ~R_MASK) {
+--- kernel-maemo-2.6.28.test.orig/sound/core/sgbuf.c
++++ kernel-maemo-2.6.28.test/sound/core/sgbuf.c
+@@ -38,6 +38,10 @@
+       if (! sgbuf)
+               return -EINVAL;
++      if (dmab->area)
++              vunmap(dmab->area);
++      dmab->area = NULL;
++
+       tmpb.dev.type = SNDRV_DMA_TYPE_DEV;
+       tmpb.dev.dev = sgbuf->dev;
+       for (i = 0; i < sgbuf->pages; i++) {
+@@ -48,9 +52,6 @@
+               tmpb.bytes = (sgbuf->table[i].addr & ~PAGE_MASK) << PAGE_SHIFT;
+               snd_dma_free_pages(&tmpb);
+       }
+-      if (dmab->area)
+-              vunmap(dmab->area);
+-      dmab->area = NULL;
+       kfree(sgbuf->table);
+       kfree(sgbuf->page_table);
+--- kernel-maemo-2.6.28.test.orig/sound/drivers/mtpav.c
++++ kernel-maemo-2.6.28.test/sound/drivers/mtpav.c
+@@ -706,7 +706,6 @@
+       mtp_card->card = card;
+       mtp_card->irq = -1;
+       mtp_card->share_irq = 0;
+-      mtp_card->inmidiport = 0xffffffff;
+       mtp_card->inmidistate = 0;
+       mtp_card->outmidihwport = 0xffffffff;
+       init_timer(&mtp_card->timer);
+@@ -719,6 +718,8 @@
+       if (err < 0)
+               goto __error;
++      mtp_card->inmidiport = mtp_card->num_ports + MTPAV_PIDX_BROADCAST;
++
+       err = snd_mtpav_get_ISA(mtp_card);
+       if (err < 0)
+               goto __error;
+--- kernel-maemo-2.6.28.test.orig/sound/isa/opl3sa2.c
++++ kernel-maemo-2.6.28.test/sound/isa/opl3sa2.c
+@@ -550,21 +550,27 @@
+ #ifdef CONFIG_PM
+ static int snd_opl3sa2_suspend(struct snd_card *card, pm_message_t state)
+ {
+-      struct snd_opl3sa2 *chip = card->private_data;
++      if (card) {
++              struct snd_opl3sa2 *chip = card->private_data;
+-      snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
+-      chip->wss->suspend(chip->wss);
+-      /* power down */
+-      snd_opl3sa2_write(chip, OPL3SA2_PM_CTRL, OPL3SA2_PM_D3);
++              snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
++              chip->wss->suspend(chip->wss);
++              /* power down */
++              snd_opl3sa2_write(chip, OPL3SA2_PM_CTRL, OPL3SA2_PM_D3);
++      }
+       return 0;
+ }
+ static int snd_opl3sa2_resume(struct snd_card *card)
+ {
+-      struct snd_opl3sa2 *chip = card->private_data;
++      struct snd_opl3sa2 *chip;
+       int i;
++      if (!card)
++              return 0;
++
++      chip = card->private_data;
+       /* power up */
+       snd_opl3sa2_write(chip, OPL3SA2_PM_CTRL, OPL3SA2_PM_D0);
+--- kernel-maemo-2.6.28.test.orig/sound/pci/aw2/aw2-alsa.c
++++ kernel-maemo-2.6.28.test/sound/pci/aw2/aw2-alsa.c
+@@ -165,7 +165,7 @@
+ MODULE_PARM_DESC(enable, "Enable Audiowerk2 soundcard.");
+ static struct pci_device_id snd_aw2_ids[] = {
+-      {PCI_VENDOR_ID_SAA7146, PCI_DEVICE_ID_SAA7146, PCI_ANY_ID, PCI_ANY_ID,
++      {PCI_VENDOR_ID_SAA7146, PCI_DEVICE_ID_SAA7146, 0, 0,
+        0, 0, 0},
+       {0}
+ };
+--- kernel-maemo-2.6.28.test.orig/sound/pci/hda/hda_intel.c
++++ kernel-maemo-2.6.28.test/sound/pci/hda/hda_intel.c
+@@ -2063,26 +2063,31 @@
+ {
+       const struct snd_pci_quirk *q;
+-      /* Check VIA HD Audio Controller exist */
+-      if (chip->pci->vendor == PCI_VENDOR_ID_VIA &&
+-          chip->pci->device == VIA_HDAC_DEVICE_ID) {
++      switch (fix) {
++      case POS_FIX_LPIB:
++      case POS_FIX_POSBUF:
++              return fix;
++      }
++
++      /* Check VIA/ATI HD Audio Controller exist */
++      switch (chip->driver_type) {
++      case AZX_DRIVER_VIA:
++      case AZX_DRIVER_ATI:
+               chip->via_dmapos_patch = 1;
+               /* Use link position directly, avoid any transfer problem. */
+               return POS_FIX_LPIB;
+       }
+       chip->via_dmapos_patch = 0;
+-      if (fix == POS_FIX_AUTO) {
+-              q = snd_pci_quirk_lookup(chip->pci, position_fix_list);
+-              if (q) {
+-                      printk(KERN_INFO
+-                                  "hda_intel: position_fix set to %d "
+-                                  "for device %04x:%04x\n",
+-                                  q->value, q->subvendor, q->subdevice);
+-                      return q->value;
+-              }
++      q = snd_pci_quirk_lookup(chip->pci, position_fix_list);
++      if (q) {
++              printk(KERN_INFO
++                     "hda_intel: position_fix set to %d "
++                     "for device %04x:%04x\n",
++                     q->value, q->subvendor, q->subdevice);
++              return q->value;
+       }
+-      return fix;
++      return POS_FIX_AUTO;
+ }
+ /*
+@@ -2208,9 +2213,17 @@
+       gcap = azx_readw(chip, GCAP);
+       snd_printdd("chipset global capabilities = 0x%x\n", gcap);
++      /* ATI chips seems buggy about 64bit DMA addresses */
++      if (chip->driver_type == AZX_DRIVER_ATI)
++              gcap &= ~0x01;
++
+       /* allow 64bit DMA address if supported by H/W */
+       if ((gcap & 0x01) && !pci_set_dma_mask(pci, DMA_64BIT_MASK))
+               pci_set_consistent_dma_mask(pci, DMA_64BIT_MASK);
++      else {
++              pci_set_dma_mask(pci, DMA_32BIT_MASK);
++              pci_set_consistent_dma_mask(pci, DMA_32BIT_MASK);
++      }
+       /* read number of streams from GCAP register instead of using
+        * hardcoded value
+--- kernel-maemo-2.6.28.test.orig/sound/pci/hda/patch_analog.c
++++ kernel-maemo-2.6.28.test/sound/pci/hda/patch_analog.c
+@@ -629,6 +629,36 @@
+       HDA_BIND_SW("Master Playback Switch", &ad1986a_laptop_master_sw),
+       HDA_CODEC_VOLUME("PCM Playback Volume", 0x03, 0x0, HDA_OUTPUT),
+       HDA_CODEC_MUTE("PCM Playback Switch", 0x03, 0x0, HDA_OUTPUT),
++      HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x17, 0, HDA_OUTPUT),
++      HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x17, 0, HDA_OUTPUT),
++      HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT),
++      HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT),
++      HDA_CODEC_VOLUME("Mic Boost", 0x0f, 0x0, HDA_OUTPUT),
++      HDA_CODEC_VOLUME("Capture Volume", 0x12, 0x0, HDA_OUTPUT),
++      HDA_CODEC_MUTE("Capture Switch", 0x12, 0x0, HDA_OUTPUT),
++      {
++              .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++              .name = "Capture Source",
++              .info = ad198x_mux_enum_info,
++              .get = ad198x_mux_enum_get,
++              .put = ad198x_mux_enum_put,
++      },
++      {
++              .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++              .name = "External Amplifier",
++              .info = ad198x_eapd_info,
++              .get = ad198x_eapd_get,
++              .put = ad198x_eapd_put,
++              .private_value = 0x1b | (1 << 8), /* port-D, inversed */
++      },
++      { } /* end */
++};
++
++static struct snd_kcontrol_new ad1986a_samsung_mixers[] = {
++      HDA_BIND_VOL("Master Playback Volume", &ad1986a_laptop_master_vol),
++      HDA_BIND_SW("Master Playback Switch", &ad1986a_laptop_master_sw),
++      HDA_CODEC_VOLUME("PCM Playback Volume", 0x03, 0x0, HDA_OUTPUT),
++      HDA_CODEC_MUTE("PCM Playback Switch", 0x03, 0x0, HDA_OUTPUT),
+       HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT),
+       HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT),
+       HDA_CODEC_VOLUME("Mic Boost", 0x0f, 0x0, HDA_OUTPUT),
+@@ -917,6 +947,7 @@
+       AD1986A_LAPTOP_EAPD,
+       AD1986A_LAPTOP_AUTOMUTE,
+       AD1986A_ULTRA,
++      AD1986A_SAMSUNG,
+       AD1986A_MODELS
+ };
+@@ -927,6 +958,7 @@
+       [AD1986A_LAPTOP_EAPD]   = "laptop-eapd",
+       [AD1986A_LAPTOP_AUTOMUTE] = "laptop-automute",
+       [AD1986A_ULTRA]         = "ultra",
++      [AD1986A_SAMSUNG]       = "samsung",
+ };
+ static struct snd_pci_quirk ad1986a_cfg_tbl[] = {
+@@ -949,9 +981,9 @@
+       SND_PCI_QUIRK(0x1179, 0xff40, "Toshiba", AD1986A_LAPTOP_EAPD),
+       SND_PCI_QUIRK(0x144d, 0xb03c, "Samsung R55", AD1986A_3STACK),
+       SND_PCI_QUIRK(0x144d, 0xc01e, "FSC V2060", AD1986A_LAPTOP),
+-      SND_PCI_QUIRK(0x144d, 0xc023, "Samsung X60", AD1986A_LAPTOP_EAPD),
+-      SND_PCI_QUIRK(0x144d, 0xc024, "Samsung R65", AD1986A_LAPTOP_EAPD),
+-      SND_PCI_QUIRK(0x144d, 0xc026, "Samsung X11", AD1986A_LAPTOP_EAPD),
++      SND_PCI_QUIRK(0x144d, 0xc023, "Samsung X60", AD1986A_SAMSUNG),
++      SND_PCI_QUIRK(0x144d, 0xc024, "Samsung R65", AD1986A_SAMSUNG),
++      SND_PCI_QUIRK(0x144d, 0xc026, "Samsung X11", AD1986A_SAMSUNG),
+       SND_PCI_QUIRK(0x144d, 0xc027, "Samsung Q1", AD1986A_ULTRA),
+       SND_PCI_QUIRK(0x144d, 0xc504, "Samsung Q35", AD1986A_3STACK),
+       SND_PCI_QUIRK(0x17aa, 0x1011, "Lenovo M55", AD1986A_LAPTOP),
+@@ -1033,6 +1065,17 @@
+               break;
+       case AD1986A_LAPTOP_EAPD:
+               spec->mixers[0] = ad1986a_laptop_eapd_mixers;
++              spec->num_init_verbs = 2;
++              spec->init_verbs[1] = ad1986a_eapd_init_verbs;
++              spec->multiout.max_channels = 2;
++              spec->multiout.num_dacs = 1;
++              spec->multiout.dac_nids = ad1986a_laptop_dac_nids;
++              if (!is_jack_available(codec, 0x25))
++                      spec->multiout.dig_out_nid = 0;
++              spec->input_mux = &ad1986a_laptop_eapd_capture_source;
++              break;
++      case AD1986A_SAMSUNG:
++              spec->mixers[0] = ad1986a_samsung_mixers;
+               spec->num_init_verbs = 3;
+               spec->init_verbs[1] = ad1986a_eapd_init_verbs;
+               spec->init_verbs[2] = ad1986a_automic_verbs;
+@@ -1830,8 +1873,8 @@
+ #define AD1988_SPDIF_OUT_HDMI 0x0b
+ #define AD1988_SPDIF_IN               0x07
+-static hda_nid_t ad1989b_slave_dig_outs[2] = {
+-      AD1988_SPDIF_OUT, AD1988_SPDIF_OUT_HDMI
++static hda_nid_t ad1989b_slave_dig_outs[] = {
++      AD1988_SPDIF_OUT, AD1988_SPDIF_OUT_HDMI, 0
+ };
+ static struct hda_input_mux ad1988_6stack_capture_source = {
+@@ -3190,7 +3233,7 @@
+       "Mic Playback Volume",
+       "CD Playback Volume",
+       "Internal Mic Playback Volume",
+-      "Docking Mic Playback Volume"
++      "Docking Mic Playback Volume",
+       "Beep Playback Volume",
+       "IEC958 Playback Volume",
+       NULL
+@@ -3861,6 +3904,7 @@
+ static struct snd_pci_quirk ad1884a_cfg_tbl[] = {
+       SND_PCI_QUIRK(0x103c, 0x3030, "HP", AD1884A_MOBILE),
+       SND_PCI_QUIRK(0x103c, 0x3056, "HP", AD1884A_MOBILE),
++      SND_PCI_QUIRK(0x103c, 0x30e6, "HP 6730b", AD1884A_LAPTOP),
+       SND_PCI_QUIRK(0x103c, 0x30e7, "HP EliteBook 8530p", AD1884A_LAPTOP),
+       SND_PCI_QUIRK(0x103c, 0x3614, "HP 6730s", AD1884A_LAPTOP),
+       SND_PCI_QUIRK(0x17aa, 0x20ac, "Thinkpad X300", AD1884A_THINKPAD),
+@@ -4221,13 +4265,13 @@
+       spec->num_adc_nids = ARRAY_SIZE(ad1882_adc_nids);
+       spec->adc_nids = ad1882_adc_nids;
+       spec->capsrc_nids = ad1882_capsrc_nids;
+-      if (codec->vendor_id == 0x11d1882)
++      if (codec->vendor_id == 0x11d41882)
+               spec->input_mux = &ad1882_capture_source;
+       else
+               spec->input_mux = &ad1882a_capture_source;
+       spec->num_mixers = 2;
+       spec->mixers[0] = ad1882_base_mixers;
+-      if (codec->vendor_id == 0x11d1882)
++      if (codec->vendor_id == 0x11d41882)
+               spec->mixers[1] = ad1882_loopback_mixers;
+       else
+               spec->mixers[1] = ad1882a_loopback_mixers;
+--- kernel-maemo-2.6.28.test.orig/sound/pci/hda/patch_conexant.c
++++ kernel-maemo-2.6.28.test/sound/pci/hda/patch_conexant.c
+@@ -1470,6 +1470,7 @@
+       SND_PCI_QUIRK(0x103c, 0x30a5, "HP DV5200T/DV8000T", CXT5047_LAPTOP_HP),
+       SND_PCI_QUIRK(0x103c, 0x30b2, "HP DV2000T/DV3000T", CXT5047_LAPTOP),
+       SND_PCI_QUIRK(0x103c, 0x30b5, "HP DV2000Z", CXT5047_LAPTOP),
++      SND_PCI_QUIRK(0x103c, 0x30cf, "HP DV6700", CXT5047_LAPTOP),
+       SND_PCI_QUIRK(0x1179, 0xff31, "Toshiba P100", CXT5047_LAPTOP_EAPD),
+       {}
+ };
+--- kernel-maemo-2.6.28.test.orig/sound/pci/hda/patch_realtek.c
++++ kernel-maemo-2.6.28.test/sound/pci/hda/patch_realtek.c
+@@ -967,6 +967,7 @@
+               case 0x10ec0267:
+               case 0x10ec0268:
+               case 0x10ec0269:
++              case 0x10ec0272:
+               case 0x10ec0660:
+               case 0x10ec0662:
+               case 0x10ec0663:
+@@ -995,6 +996,7 @@
+               case 0x10ec0882:
+               case 0x10ec0883:
+               case 0x10ec0885:
++              case 0x10ec0887:
+               case 0x10ec0889:
+                       snd_hda_codec_write(codec, 0x20, 0,
+                                           AC_VERB_SET_COEF_INDEX, 7);
+@@ -6776,10 +6778,12 @@
+               case 0x106b2800: /* AppleTV */
+                       board_config = ALC885_IMAC24;
+                       break;
++              case 0x106b00a0: /* MacBookPro3,1 - Another revision */
+               case 0x106b00a1: /* Macbook (might be wrong - PCI SSID?) */
+               case 0x106b00a4: /* MacbookPro4,1 */
+               case 0x106b2c00: /* Macbook Pro rev3 */
+               case 0x106b3600: /* Macbook 3.1 */
++              case 0x106b3800: /* MacbookPro4,1 - latter revision */
+                       board_config = ALC885_MBP3;
+                       break;
+               default:
+@@ -8462,6 +8466,8 @@
+       SND_PCI_QUIRK(0x1558, 0, "Clevo laptop", ALC883_LAPTOP_EAPD),
+       SND_PCI_QUIRK(0x15d9, 0x8780, "Supermicro PDSBA", ALC883_3ST_6ch),
+       SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_MEDION),
++      SND_PCI_QUIRK(0x1734, 0x1107, "FSC AMILO Xi2550",
++                    ALC883_FUJITSU_PI2515),
+       SND_PCI_QUIRK(0x1734, 0x1108, "Fujitsu AMILO Pi2515", ALC883_FUJITSU_PI2515),
+       SND_PCI_QUIRK(0x17aa, 0x101e, "Lenovo 101e", ALC883_LENOVO_101E_2ch),
+       SND_PCI_QUIRK(0x17aa, 0x2085, "Lenovo NB0763", ALC883_LENOVO_NB0763),
+@@ -10473,6 +10479,7 @@
+       SND_PCI_QUIRK(0x10cf, 0x142d, "Fujitsu Lifebook E8410", ALC262_FUJITSU),
+       SND_PCI_QUIRK(0x144d, 0xc032, "Samsung Q1 Ultra", ALC262_ULTRA),
+       SND_PCI_QUIRK(0x144d, 0xc039, "Samsung Q1U EL", ALC262_ULTRA),
++      SND_PCI_QUIRK(0x144d, 0xc510, "Samsung Q45", ALC262_HIPPO),
+       SND_PCI_QUIRK(0x17aa, 0x384e, "Lenovo 3000 y410", ALC262_LENOVO_3000),
+       SND_PCI_QUIRK(0x17ff, 0x0560, "Benq ED8", ALC262_BENQ_ED8),
+       SND_PCI_QUIRK(0x17ff, 0x058d, "Benq T31-16", ALC262_BENQ_T31),
+--- kernel-maemo-2.6.28.test.orig/sound/pci/hda/patch_sigmatel.c
++++ kernel-maemo-2.6.28.test/sound/pci/hda/patch_sigmatel.c
+@@ -89,6 +89,7 @@
+       STAC_DELL_M4_2,
+       STAC_DELL_M4_3,
+       STAC_HP_M4,
++      STAC_HP_DV5,
+       STAC_92HD71BXX_MODELS
+ };
+@@ -1702,6 +1703,7 @@
+       [STAC_DELL_M4_2]        = dell_m4_2_pin_configs,
+       [STAC_DELL_M4_3]        = dell_m4_3_pin_configs,
+       [STAC_HP_M4]            = NULL,
++      [STAC_HP_DV5]           = NULL,
+ };
+ static const char *stac92hd71bxx_models[STAC_92HD71BXX_MODELS] = {
+@@ -1710,6 +1712,7 @@
+       [STAC_DELL_M4_2] = "dell-m4-2",
+       [STAC_DELL_M4_3] = "dell-m4-3",
+       [STAC_HP_M4] = "hp-m4",
++      [STAC_HP_DV5] = "hp-dv5",
+ };
+ static struct snd_pci_quirk stac92hd71bxx_cfg_tbl[] = {
+@@ -1720,6 +1723,10 @@
+                     "HP dv5", STAC_HP_M4),
+       SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x30f4,
+                     "HP dv7", STAC_HP_M4),
++      SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x30fc,
++                    "HP dv7", STAC_HP_M4),
++      SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3603,
++                    "HP dv5", STAC_HP_DV5),
+       SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x361a,
+                               "unknown HP", STAC_HP_M4),
+       SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0233,
+@@ -2421,6 +2428,8 @@
+       info->name = "STAC92xx Analog";
+       info->stream[SNDRV_PCM_STREAM_PLAYBACK] = stac92xx_pcm_analog_playback;
++      info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid =
++              spec->multiout.dac_nids[0];
+       info->stream[SNDRV_PCM_STREAM_CAPTURE] = stac92xx_pcm_analog_capture;
+       info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->adc_nids[0];
+       info->stream[SNDRV_PCM_STREAM_CAPTURE].substreams = spec->num_adcs;
+@@ -3978,8 +3987,19 @@
+                       continue;
+               if (presence)
+                       stac92xx_set_pinctl(codec, cfg->hp_pins[i], val);
++#if 0 /* FIXME */
++/* Resetting the pinctl like below may lead to (a sort of) regressions
++ * on some devices since they use the HP pin actually for line/speaker
++ * outs although the default pin config shows a different pin (that is
++ * wrong and useless).
++ *
++ * So, it's basically a problem of default pin configs, likely a BIOS issue.
++ * But, disabling the code below just works around it, and I'm too tired of
++ * bug reports with such devices...
++ */
+               else
+                       stac92xx_reset_pinctl(codec, cfg->hp_pins[i], val);
++#endif /* FIXME */
+       }
+ } 
+@@ -4648,7 +4668,7 @@
+       case STAC_DELL_M4_3:
+               spec->num_dmics = 1;
+               spec->num_smuxes = 0;
+-              spec->num_dmuxes = 0;
++              spec->num_dmuxes = 1;
+               break;
+       default:
+               spec->num_dmics = STAC92HD71BXX_NUM_DMICS;
+--- kernel-maemo-2.6.28.test.orig/sound/pci/mixart/mixart.c
++++ kernel-maemo-2.6.28.test/sound/pci/mixart/mixart.c
+@@ -607,6 +607,7 @@
+       /* set the format to the board */
+       err = mixart_set_format(stream, format);
+       if(err < 0) {
++              mutex_unlock(&mgr->setup_mutex);
+               return err;
+       }
+--- kernel-maemo-2.6.28.test.orig/sound/pci/oxygen/virtuoso.c
++++ kernel-maemo-2.6.28.test/sound/pci/oxygen/virtuoso.c
+@@ -899,6 +899,7 @@
+       .dac_channels = 8,
+       .dac_volume_min = 0x0f,
+       .dac_volume_max = 0xff,
++      .misc_flags = OXYGEN_MISC_MIDI,
+       .function_flags = OXYGEN_FUNCTION_2WIRE,
+       .dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
+       .adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
+--- kernel-maemo-2.6.28.test.orig/sound/usb/caiaq/caiaq-device.h
++++ kernel-maemo-2.6.28.test/sound/usb/caiaq/caiaq-device.h
+@@ -75,6 +75,7 @@
+       wait_queue_head_t ep1_wait_queue;
+       wait_queue_head_t prepare_wait_queue;
+       int spec_received, audio_parm_answer;
++      int midi_out_active;
+       char vendor_name[CAIAQ_USB_STR_LEN];
+       char product_name[CAIAQ_USB_STR_LEN];
+--- kernel-maemo-2.6.28.test.orig/sound/usb/caiaq/caiaq-midi.c
++++ kernel-maemo-2.6.28.test/sound/usb/caiaq/caiaq-midi.c
+@@ -59,6 +59,11 @@
+ static int snd_usb_caiaq_midi_output_close(struct snd_rawmidi_substream *substream)
+ {
++      struct snd_usb_caiaqdev *dev = substream->rmidi->private_data;
++      if (dev->midi_out_active) {
++              usb_kill_urb(&dev->midi_out_urb);
++              dev->midi_out_active = 0;
++      }
+       return 0;
+ }
+@@ -69,7 +74,8 @@
+       
+       dev->midi_out_buf[0] = EP1_CMD_MIDI_WRITE;
+       dev->midi_out_buf[1] = 0; /* port */
+-      len = snd_rawmidi_transmit_peek(substream, dev->midi_out_buf+3, EP1_BUFSIZE-3);
++      len = snd_rawmidi_transmit(substream, dev->midi_out_buf + 3,
++                                 EP1_BUFSIZE - 3);
+       
+       if (len <= 0)
+               return;
+@@ -79,24 +85,24 @@
+       
+       ret = usb_submit_urb(&dev->midi_out_urb, GFP_ATOMIC);
+       if (ret < 0)
+-              log("snd_usb_caiaq_midi_send(%p): usb_submit_urb() failed, %d\n",
+-                              substream, ret);
++              log("snd_usb_caiaq_midi_send(%p): usb_submit_urb() failed,"
++                  "ret=%d, len=%d\n",
++                  substream, ret, len);
++      else
++              dev->midi_out_active = 1;
+ }
+ static void snd_usb_caiaq_midi_output_trigger(struct snd_rawmidi_substream *substream, int up)
+ {
+       struct snd_usb_caiaqdev *dev = substream->rmidi->private_data;
+       
+-      if (dev->midi_out_substream != NULL)
+-              return;
+-      
+-      if (!up) {
++      if (up) {
++              dev->midi_out_substream = substream;
++              if (!dev->midi_out_active)
++                      snd_usb_caiaq_midi_send(dev, substream);
++      } else {
+               dev->midi_out_substream = NULL;
+-              return;
+       }
+-      
+-      dev->midi_out_substream = substream;
+-      snd_usb_caiaq_midi_send(dev, substream);
+ }
+@@ -161,16 +167,14 @@
+ void snd_usb_caiaq_midi_output_done(struct urb* urb)
+ {
+       struct snd_usb_caiaqdev *dev = urb->context;
+-              char *buf = urb->transfer_buffer;
+       
++      dev->midi_out_active = 0;
+       if (urb->status != 0)
+               return;
+       if (!dev->midi_out_substream)
+               return;
+-      snd_rawmidi_transmit_ack(dev->midi_out_substream, buf[2]);
+-      dev->midi_out_substream = NULL;
+       snd_usb_caiaq_midi_send(dev, dev->midi_out_substream);
+ }
+--- kernel-maemo-2.6.28.test.orig/sound/usb/usbaudio.c
++++ kernel-maemo-2.6.28.test/sound/usb/usbaudio.c
+@@ -2524,7 +2524,6 @@
+                * build the rate table and bitmap flags
+                */
+               int r, idx;
+-              unsigned int nonzero_rates = 0;
+               fp->rate_table = kmalloc(sizeof(int) * nr_rates, GFP_KERNEL);
+               if (fp->rate_table == NULL) {
+@@ -2532,24 +2531,27 @@
+                       return -1;
+               }
+-              fp->nr_rates = nr_rates;
+-              fp->rate_min = fp->rate_max = combine_triple(&fmt[8]);
++              fp->nr_rates = 0;
++              fp->rate_min = fp->rate_max = 0;
+               for (r = 0, idx = offset + 1; r < nr_rates; r++, idx += 3) {
+                       unsigned int rate = combine_triple(&fmt[idx]);
++                      if (!rate)
++                              continue;
+                       /* C-Media CM6501 mislabels its 96 kHz altsetting */
+                       if (rate == 48000 && nr_rates == 1 &&
+-                          chip->usb_id == USB_ID(0x0d8c, 0x0201) &&
++                          (chip->usb_id == USB_ID(0x0d8c, 0x0201) ||
++                           chip->usb_id == USB_ID(0x0d8c, 0x0102)) &&
+                           fp->altsetting == 5 && fp->maxpacksize == 392)
+                               rate = 96000;
+-                      fp->rate_table[r] = rate;
+-                      nonzero_rates |= rate;
+-                      if (rate < fp->rate_min)
++                      fp->rate_table[fp->nr_rates] = rate;
++                      if (!fp->rate_min || rate < fp->rate_min)
+                               fp->rate_min = rate;
+-                      else if (rate > fp->rate_max)
++                      if (!fp->rate_max || rate > fp->rate_max)
+                               fp->rate_max = rate;
+                       fp->rates |= snd_pcm_rate_to_rate_bit(rate);
++                      fp->nr_rates++;
+               }
+-              if (!nonzero_rates) {
++              if (!fp->nr_rates) {
+                       hwc_debug("All rates were zero. Skipping format!\n");
+                       return -1;
+               }
+@@ -2966,6 +2968,7 @@
+               return -EINVAL;
+       }
+       alts = &iface->altsetting[fp->altset_idx];
++      fp->maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize);
+       usb_set_interface(chip->dev, fp->iface, 0);
+       init_usb_pitch(chip->dev, fp->iface, alts, fp);
+       init_usb_sample_rate(chip->dev, fp->iface, alts, fp, fp->rate_max);
+--- kernel-maemo-2.6.28.test.orig/sound/usb/usbmidi.c
++++ kernel-maemo-2.6.28.test/sound/usb/usbmidi.c
+@@ -1628,6 +1628,7 @@
+       }
+       ep_info.out_ep = get_endpoint(hostif, 2)->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
++      ep_info.out_interval = 0;
+       ep_info.out_cables = endpoint->out_cables & 0x5555;
+       err = snd_usbmidi_out_endpoint_create(umidi, &ep_info, &umidi->endpoints[0]);
+       if (err < 0)
+--- kernel-maemo-2.6.28.test.orig/virt/kvm/kvm_main.c
++++ kernel-maemo-2.6.28.test/virt/kvm/kvm_main.c
+@@ -553,11 +553,19 @@
+       return young;
+ }
++static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
++                                   struct mm_struct *mm)
++{
++      struct kvm *kvm = mmu_notifier_to_kvm(mn);
++      kvm_arch_flush_shadow(kvm);
++}
++
+ static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
+       .invalidate_page        = kvm_mmu_notifier_invalidate_page,
+       .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
+       .invalidate_range_end   = kvm_mmu_notifier_invalidate_range_end,
+       .clear_flush_young      = kvm_mmu_notifier_clear_flush_young,
++      .release                = kvm_mmu_notifier_release,
+ };
+ #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
+@@ -821,7 +829,10 @@
+               goto out_free;
+       }
+-      kvm_free_physmem_slot(&old, &new);
++      kvm_free_physmem_slot(&old, npages ? &new : NULL);
++      /* Slot deletion case: we have to update the current slot */
++      if (!npages)
++              *memslot = old;
+ #ifdef CONFIG_DMAR
+       /* map the pages in iommu page table */
+       r = kvm_iommu_map_pages(kvm, base_gfn, npages);
+@@ -918,7 +929,7 @@
+ }
+ EXPORT_SYMBOL_GPL(kvm_is_error_hva);
+-static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
++struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
+ {
+       int i;
+@@ -931,11 +942,12 @@
+       }
+       return NULL;
+ }
++EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased);
+ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
+ {
+       gfn = unalias_gfn(kvm, gfn);
+-      return __gfn_to_memslot(kvm, gfn);
++      return gfn_to_memslot_unaliased(kvm, gfn);
+ }
+ int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
+@@ -959,7 +971,7 @@
+       struct kvm_memory_slot *slot;
+       gfn = unalias_gfn(kvm, gfn);
+-      slot = __gfn_to_memslot(kvm, gfn);
++      slot = gfn_to_memslot_unaliased(kvm, gfn);
+       if (!slot)
+               return bad_hva();
+       return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
+@@ -1210,7 +1222,7 @@
+       struct kvm_memory_slot *memslot;
+       gfn = unalias_gfn(kvm, gfn);
+-      memslot = __gfn_to_memslot(kvm, gfn);
++      memslot = gfn_to_memslot_unaliased(kvm, gfn);
+       if (memslot && memslot->dirty_bitmap) {
+               unsigned long rel_gfn = gfn - memslot->base_gfn;
+@@ -1295,7 +1307,7 @@
+       return 0;
+ }
+-static const struct file_operations kvm_vcpu_fops = {
++static struct file_operations kvm_vcpu_fops = {
+       .release        = kvm_vcpu_release,
+       .unlocked_ioctl = kvm_vcpu_ioctl,
+       .compat_ioctl   = kvm_vcpu_ioctl,
+@@ -1689,7 +1701,7 @@
+       return 0;
+ }
+-static const struct file_operations kvm_vm_fops = {
++static struct file_operations kvm_vm_fops = {
+       .release        = kvm_vm_release,
+       .unlocked_ioctl = kvm_vm_ioctl,
+       .compat_ioctl   = kvm_vm_ioctl,
+@@ -1711,6 +1723,17 @@
+       return fd;
+ }
++static long kvm_dev_ioctl_check_extension_generic(long arg)
++{
++      switch (arg) {
++      case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
++              return 1;
++      default:
++              break;
++      }
++      return kvm_dev_ioctl_check_extension(arg);
++}
++
+ static long kvm_dev_ioctl(struct file *filp,
+                         unsigned int ioctl, unsigned long arg)
+ {
+@@ -1730,7 +1753,7 @@
+               r = kvm_dev_ioctl_create_vm();
+               break;
+       case KVM_CHECK_EXTENSION:
+-              r = kvm_dev_ioctl_check_extension(arg);
++              r = kvm_dev_ioctl_check_extension_generic(arg);
+               break;
+       case KVM_GET_VCPU_MMAP_SIZE:
+               r = -EINVAL;
+@@ -2053,6 +2076,8 @@
+       }
+       kvm_chardev_ops.owner = module;
++      kvm_vm_fops.owner = module;
++      kvm_vcpu_fops.owner = module;
+       r = misc_register(&kvm_dev);
+       if (r) {
+--- kernel-maemo-2.6.28.test.orig/virt/kvm/kvm_trace.c
++++ kernel-maemo-2.6.28.test/virt/kvm/kvm_trace.c
+@@ -252,6 +252,7 @@
+                       struct kvm_trace_probe *p = &kvm_trace_probes[i];
+                       marker_probe_unregister(p->name, p->probe_func, p);
+               }
++              marker_synchronize_unregister();
+               relay_close(kt->rchan);
+               debugfs_remove(kt->lost_file);
diff --git a/kernel-maemo-2.6.28/debian/patches/block2mtd-yoush.diff b/kernel-maemo-2.6.28/debian/patches/block2mtd-yoush.diff
new file mode 100644 (file)
index 0000000..ae472d7
--- /dev/null
@@ -0,0 +1,12 @@
+--- kernel-maemo-2.6.28.orig/drivers/mtd/devices/block2mtd.c
++++ kernel-maemo-2.6.28/drivers/mtd/devices/block2mtd.c
+@@ -285,7 +285,8 @@
+       dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
+       dev->mtd.erasesize = erase_size;
+-      dev->mtd.writesize = 1;
++      dev->mtd.writesize = 2048;
++      dev->mtd.subpage_sft = 2;
+       dev->mtd.type = MTD_RAM;
+       dev->mtd.flags = MTD_CAP_RAM;
+       dev->mtd.erase = block2mtd_erase;
diff --git a/kernel-maemo-2.6.28/debian/patches/gentoo-fsfixes.diff b/kernel-maemo-2.6.28/debian/patches/gentoo-fsfixes.diff
new file mode 100644 (file)
index 0000000..07bc7d1
--- /dev/null
@@ -0,0 +1,388 @@
+--- kernel-maemo-2.6.28.test.orig/fs/ext4/ext4.h
++++ kernel-maemo-2.6.28.test/fs/ext4/ext4.h
+@@ -255,6 +255,7 @@
+ #define EXT4_STATE_NEW                        0x00000002 /* inode is newly created */
+ #define EXT4_STATE_XATTR              0x00000004 /* has in-inode xattrs */
+ #define EXT4_STATE_NO_EXPAND          0x00000008 /* No space for expansion */
++#define EXT4_STATE_DA_ALLOC_CLOSE     0x00000010 /* Alloc DA blks on close */
+ /* Used to pass group descriptor data when online resize is done */
+ struct ext4_new_group_input {
+@@ -302,7 +303,9 @@
+ #define EXT4_IOC_GROUP_EXTEND         _IOW('f', 7, unsigned long)
+ #define EXT4_IOC_GROUP_ADD            _IOW('f', 8, struct ext4_new_group_input)
+ #define EXT4_IOC_MIGRATE              _IO('f', 9)
++ /* note ioctl 10 reserved for an early version of the FIEMAP ioctl */
+  /* note ioctl 11 reserved for filesystem-independent FIEMAP ioctl */
++#define EXT4_IOC_ALLOC_DA_BLKS                _IO('f', 12)
+ /*
+  * ioctl commands in 32 bit emulation
+@@ -1092,6 +1095,7 @@
+ extern void ext4_truncate(struct inode *);
+ extern void ext4_set_inode_flags(struct inode *);
+ extern void ext4_get_inode_flags(struct ext4_inode_info *);
++extern int ext4_alloc_da_blocks(struct inode *inode);
+ extern void ext4_set_aops(struct inode *inode);
+ extern int ext4_writepage_trans_blocks(struct inode *);
+ extern int ext4_meta_trans_blocks(struct inode *, int nrblocks, int idxblocks);
+--- kernel-maemo-2.6.28.test.orig/fs/ext4/file.c
++++ kernel-maemo-2.6.28.test/fs/ext4/file.c
+@@ -33,9 +33,14 @@
+  */
+ static int ext4_release_file(struct inode *inode, struct file *filp)
+ {
++      if (EXT4_I(inode)->i_state & EXT4_STATE_DA_ALLOC_CLOSE) {
++              ext4_alloc_da_blocks(inode);
++              EXT4_I(inode)->i_state &= ~EXT4_STATE_DA_ALLOC_CLOSE;
++      }
+       /* if we are the last writer on the inode, drop the block reservation */
+       if ((filp->f_mode & FMODE_WRITE) &&
+-                      (atomic_read(&inode->i_writecount) == 1))
++                      (atomic_read(&inode->i_writecount) == 1) &&
++                      !EXT4_I(inode)->i_reserved_data_blocks)
+       {
+               down_write(&EXT4_I(inode)->i_data_sem);
+               ext4_discard_preallocations(inode);
+--- kernel-maemo-2.6.28.test.orig/fs/ext4/inode.c
++++ kernel-maemo-2.6.28.test/fs/ext4/inode.c
+@@ -46,10 +46,8 @@
+ static inline int ext4_begin_ordered_truncate(struct inode *inode,
+                                             loff_t new_size)
+ {
+-      return jbd2_journal_begin_ordered_truncate(
+-                                      EXT4_SB(inode->i_sb)->s_journal,
+-                                      &EXT4_I(inode)->jinode,
+-                                      new_size);
++      return jbd2_journal_begin_ordered_truncate(&EXT4_I(inode)->jinode,
++                                                 new_size);
+ }
+ static void ext4_invalidatepage(struct page *page, unsigned long offset);
+@@ -1021,6 +1019,14 @@
+       EXT4_I(inode)->i_reserved_data_blocks -= used;
+       spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
++
++      /*
++       * If have done all the pending block allocation and if the we
++       * don't have any writer on the inode, we can discard the
++       * inode's preallocations.
++       */
++      if (!total && (atomic_read(&inode->i_writecount) == 0))
++              ext4_discard_preallocations(inode);
+ }
+ /*
+@@ -2748,6 +2754,48 @@
+       return;
+ }
++/*
++ * Force all delayed allocation blocks to be allocated for a given inode.
++ */
++int ext4_alloc_da_blocks(struct inode *inode)
++{
++      if (!EXT4_I(inode)->i_reserved_data_blocks &&
++          !EXT4_I(inode)->i_reserved_meta_blocks)
++              return 0;
++
++      /*
++       * We do something simple for now.  The filemap_flush() will
++       * also start triggering a write of the data blocks, which is
++       * not strictly speaking necessary (and for users of
++       * laptop_mode, not even desirable).  However, to do otherwise
++       * would require replicating code paths in:
++       * 
++       * ext4_da_writepages() ->
++       *    write_cache_pages() ---> (via passed in callback function)
++       *        __mpage_da_writepage() -->
++       *           mpage_add_bh_to_extent()
++       *           mpage_da_map_blocks()
++       *
++       * The problem is that write_cache_pages(), located in
++       * mm/page-writeback.c, marks pages clean in preparation for
++       * doing I/O, which is not desirable if we're not planning on
++       * doing I/O at all.
++       *
++       * We could call write_cache_pages(), and then redirty all of
++       * the pages by calling redirty_page_for_writeback() but that
++       * would be ugly in the extreme.  So instead we would need to
++       * replicate parts of the code in the above functions,
++       * simplifying them becuase we wouldn't actually intend to
++       * write out the pages, but rather only collect contiguous
++       * logical block extents, call the multi-block allocator, and
++       * then update the buffer heads with the block allocations.
++       * 
++       * For now, though, we'll cheat by calling filemap_flush(),
++       * which will map the blocks, and start the I/O, but not
++       * actually wait for the I/O to complete.
++       */
++      return filemap_flush(inode->i_mapping);
++}
+ /*
+  * bmap() is special.  It gets used by applications such as lilo and by
+@@ -3757,6 +3805,9 @@
+       if (!ext4_can_truncate(inode))
+               return;
++      if (inode->i_size == 0)
++              ei->i_state |= EXT4_STATE_DA_ALLOC_CLOSE;
++
+       if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
+               ext4_ext_truncate(inode);
+               return;
+--- kernel-maemo-2.6.28.test.orig/fs/ext4/ioctl.c
++++ kernel-maemo-2.6.28.test/fs/ext4/ioctl.c
+@@ -263,6 +263,20 @@
+               return err;
+       }
++      case EXT4_IOC_ALLOC_DA_BLKS:
++      {
++              int err;
++              if (!is_owner_or_cap(inode))
++                      return -EACCES;
++
++              err = mnt_want_write(filp->f_path.mnt);
++              if (err)
++                      return err;
++              err = ext4_alloc_da_blocks(inode);
++              mnt_drop_write(filp->f_path.mnt);
++              return err;
++      }
++
+       default:
+               return -ENOTTY;
+       }
+--- kernel-maemo-2.6.28.test.orig/fs/ext4/namei.c
++++ kernel-maemo-2.6.28.test/fs/ext4/namei.c
+@@ -2298,7 +2298,7 @@
+       struct inode *old_inode, *new_inode;
+       struct buffer_head *old_bh, *new_bh, *dir_bh;
+       struct ext4_dir_entry_2 *old_de, *new_de;
+-      int retval;
++      int retval, force_da_alloc = 0;
+       old_bh = new_bh = dir_bh = NULL;
+@@ -2436,6 +2436,7 @@
+               ext4_mark_inode_dirty(handle, new_inode);
+               if (!new_inode->i_nlink)
+                       ext4_orphan_add(handle, new_inode);
++              force_da_alloc = 1;
+       }
+       retval = 0;
+@@ -2444,6 +2445,8 @@
+       brelse(old_bh);
+       brelse(new_bh);
+       ext4_journal_stop(handle);
++      if (retval == 0 && force_da_alloc)
++              ext4_alloc_da_blocks(old_inode);
+       return retval;
+ }
+--- kernel-maemo-2.6.28.test.orig/fs/jbd/journal.c
++++ kernel-maemo-2.6.28.test/fs/jbd/journal.c
+@@ -427,7 +427,7 @@
+ }
+ /*
+- * Called under j_state_lock.  Returns true if a transaction was started.
++ * Called under j_state_lock.  Returns true if a transaction commit was started.
+  */
+ int __log_start_commit(journal_t *journal, tid_t target)
+ {
+@@ -495,7 +495,8 @@
+ /*
+  * Start a commit of the current running transaction (if any).  Returns true
+- * if a transaction was started, and fills its tid in at *ptid
++ * if a transaction is going to be committed (or is currently already
++ * committing), and fills its tid in at *ptid
+  */
+ int journal_start_commit(journal_t *journal, tid_t *ptid)
+ {
+@@ -505,15 +506,19 @@
+       if (journal->j_running_transaction) {
+               tid_t tid = journal->j_running_transaction->t_tid;
+-              ret = __log_start_commit(journal, tid);
+-              if (ret && ptid)
++              __log_start_commit(journal, tid);
++              /* There's a running transaction and we've just made sure
++               * it's commit has been scheduled. */
++              if (ptid)
+                       *ptid = tid;
+-      } else if (journal->j_committing_transaction && ptid) {
++              ret = 1;
++      } else if (journal->j_committing_transaction) {
+               /*
+                * If ext3_write_super() recently started a commit, then we
+                * have to wait for completion of that transaction
+                */
+-              *ptid = journal->j_committing_transaction->t_tid;
++              if (ptid)
++                      *ptid = journal->j_committing_transaction->t_tid;
+               ret = 1;
+       }
+       spin_unlock(&journal->j_state_lock);
+--- kernel-maemo-2.6.28.test.orig/fs/jbd2/checkpoint.c
++++ kernel-maemo-2.6.28.test/fs/jbd2/checkpoint.c
+@@ -686,6 +686,7 @@
+          safely remove this transaction from the log */
+       __jbd2_journal_drop_transaction(journal, transaction);
++      kfree(transaction);
+       /* Just in case anybody was waiting for more transactions to be
+            checkpointed... */
+@@ -760,5 +761,4 @@
+       J_ASSERT(journal->j_running_transaction != transaction);
+       jbd_debug(1, "Dropping transaction %d, all done\n", transaction->t_tid);
+-      kfree(transaction);
+ }
+--- kernel-maemo-2.6.28.test.orig/fs/jbd2/commit.c
++++ kernel-maemo-2.6.28.test/fs/jbd2/commit.c
+@@ -361,7 +361,7 @@
+       int space_left = 0;
+       int first_tag = 0;
+       int tag_flag;
+-      int i;
++      int i, to_free = 0;
+       int tag_bytes = journal_tag_bytes(journal);
+       struct buffer_head *cbh = NULL; /* For transactional checksums */
+       __u32 crc32_sum = ~0;
+@@ -997,12 +997,10 @@
+       journal->j_committing_transaction = NULL;
+       spin_unlock(&journal->j_state_lock);
+-      if (journal->j_commit_callback)
+-              journal->j_commit_callback(journal, commit_transaction);
+-
+       if (commit_transaction->t_checkpoint_list == NULL &&
+           commit_transaction->t_checkpoint_io_list == NULL) {
+               __jbd2_journal_drop_transaction(journal, commit_transaction);
++              to_free = 1;
+       } else {
+               if (journal->j_checkpoint_transactions == NULL) {
+                       journal->j_checkpoint_transactions = commit_transaction;
+@@ -1021,11 +1019,16 @@
+       }
+       spin_unlock(&journal->j_list_lock);
++      if (journal->j_commit_callback)
++              journal->j_commit_callback(journal, commit_transaction);
++
+       trace_mark(jbd2_end_commit, "dev %s transaction %d head %d",
+-                 journal->j_devname, journal->j_commit_sequence,
++                 journal->j_devname, commit_transaction->t_tid,
+                  journal->j_tail_sequence);
+       jbd_debug(1, "JBD: commit %d complete, head %d\n",
+                 journal->j_commit_sequence, journal->j_tail_sequence);
++      if (to_free)
++              kfree(commit_transaction);
+       wake_up(&journal->j_wait_done_commit);
+ }
+--- kernel-maemo-2.6.28.test.orig/fs/jbd2/transaction.c
++++ kernel-maemo-2.6.28.test/fs/jbd2/transaction.c
+@@ -2050,46 +2050,26 @@
+ }
+ /*
+- * File truncate and transaction commit interact with each other in a
+- * non-trivial way.  If a transaction writing data block A is
+- * committing, we cannot discard the data by truncate until we have
+- * written them.  Otherwise if we crashed after the transaction with
+- * write has committed but before the transaction with truncate has
+- * committed, we could see stale data in block A.  This function is a
+- * helper to solve this problem.  It starts writeout of the truncated
+- * part in case it is in the committing transaction.
+- *
+- * Filesystem code must call this function when inode is journaled in
+- * ordered mode before truncation happens and after the inode has been
+- * placed on orphan list with the new inode size. The second condition
+- * avoids the race that someone writes new data and we start
+- * committing the transaction after this function has been called but
+- * before a transaction for truncate is started (and furthermore it
+- * allows us to optimize the case where the addition to orphan list
+- * happens in the same transaction as write --- we don't have to write
+- * any data in such case).
++ * This function must be called when inode is journaled in ordered mode
++ * before truncation happens. It starts writeout of truncated part in
++ * case it is in the committing transaction so that we stand to ordered
++ * mode consistency guarantees.
+  */
+-int jbd2_journal_begin_ordered_truncate(journal_t *journal,
+-                                      struct jbd2_inode *jinode,
++int jbd2_journal_begin_ordered_truncate(struct jbd2_inode *inode,
+                                       loff_t new_size)
+ {
+-      transaction_t *inode_trans, *commit_trans;
++      journal_t *journal;
++      transaction_t *commit_trans;
+       int ret = 0;
+-      /* This is a quick check to avoid locking if not necessary */
+-      if (!jinode->i_transaction)
++      if (!inode->i_transaction && !inode->i_next_transaction)
+               goto out;
+-      /* Locks are here just to force reading of recent values, it is
+-       * enough that the transaction was not committing before we started
+-       * a transaction adding the inode to orphan list */
++      journal = inode->i_transaction->t_journal;
+       spin_lock(&journal->j_state_lock);
+       commit_trans = journal->j_committing_transaction;
+       spin_unlock(&journal->j_state_lock);
+-      spin_lock(&journal->j_list_lock);
+-      inode_trans = jinode->i_transaction;
+-      spin_unlock(&journal->j_list_lock);
+-      if (inode_trans == commit_trans) {
+-              ret = filemap_fdatawrite_range(jinode->i_vfs_inode->i_mapping,
++      if (inode->i_transaction == commit_trans) {
++              ret = filemap_fdatawrite_range(inode->i_vfs_inode->i_mapping,
+                       new_size, LLONG_MAX);
+               if (ret)
+                       jbd2_journal_abort(journal, ret);
+--- kernel-maemo-2.6.28.test.orig/fs/ocfs2/journal.h
++++ kernel-maemo-2.6.28.test/fs/ocfs2/journal.h
+@@ -445,10 +445,8 @@
+ static inline int ocfs2_begin_ordered_truncate(struct inode *inode,
+                                              loff_t new_size)
+ {
+-      return jbd2_journal_begin_ordered_truncate(
+-                              OCFS2_SB(inode->i_sb)->journal->j_journal,
+-                              &OCFS2_I(inode)->ip_jinode,
+-                              new_size);
++      return jbd2_journal_begin_ordered_truncate(&OCFS2_I(inode)->ip_jinode,
++                                                 new_size);
+ }
+ #endif /* OCFS2_JOURNAL_H */
+--- kernel-maemo-2.6.28.test.orig/include/linux/jbd2.h
++++ kernel-maemo-2.6.28.test/include/linux/jbd2.h
+@@ -1087,8 +1087,7 @@
+ extern int       jbd2_journal_bmap(journal_t *, unsigned long, unsigned long long *);
+ extern int       jbd2_journal_force_commit(journal_t *);
+ extern int       jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *inode);
+-extern int       jbd2_journal_begin_ordered_truncate(journal_t *journal,
+-                              struct jbd2_inode *inode, loff_t new_size);
++extern int       jbd2_journal_begin_ordered_truncate(struct jbd2_inode *inode, loff_t new_size);
+ extern void      jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, struct inode *inode);
+ extern void      jbd2_journal_release_jbd_inode(journal_t *journal, struct jbd2_inode *jinode);
+@@ -1148,8 +1147,8 @@
+ int jbd2_log_do_checkpoint(journal_t *journal);
+ void __jbd2_log_wait_for_space(journal_t *journal);
+-extern void   __jbd2_journal_drop_transaction(journal_t *, transaction_t *);
+-extern int    jbd2_cleanup_journal_tail(journal_t *);
++extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *);
++extern int jbd2_cleanup_journal_tail(journal_t *);
+ /* Debugging code only: */
diff --git a/kernel-maemo-2.6.28/debian/patches/nokia-swapfile.diff b/kernel-maemo-2.6.28/debian/patches/nokia-swapfile.diff
new file mode 100644 (file)
index 0000000..663b06b
--- /dev/null
@@ -0,0 +1,247 @@
+--- kernel-maemo-2.6.28.orig/mm/swapfile.c
++++ kernel-maemo-2.6.28/mm/swapfile.c
+@@ -273,22 +273,41 @@
+ static int swap_entry_free(struct swap_info_struct *p, unsigned long offset)
+ {
+       int count = p->swap_map[offset];
++      unsigned old;
+-      if (count < SWAP_MAP_MAX) {
+-              count--;
+-              p->swap_map[offset] = count;
+-              if (!count) {
+-                      if (offset < p->lowest_bit)
+-                              p->lowest_bit = offset;
+-                      if (offset > p->highest_bit)
+-                              p->highest_bit = offset;
+-                      if (p->prio > swap_info[swap_list.next].prio)
+-                              swap_list.next = p - swap_info;
+-                      nr_swap_pages++;
+-                      p->inuse_pages--;
+-              }
+-      }
+-      return count;
++      if (count >= SWAP_MAP_MAX)
++              return count;
++
++      count--;
++      p->swap_map[offset] = count;
++      if (count)
++              return count;
++
++      spin_lock(&p->remap_lock);
++
++      if (offset < p->lowest_bit)
++              p->lowest_bit = offset;
++      if (offset > p->highest_bit)
++              p->highest_bit = offset;
++      if (p->prio > swap_info[swap_list.next].prio)
++              swap_list.next = p - swap_info;
++      nr_swap_pages++;
++      p->inuse_pages--;
++
++      /* Re-map the page number */
++      old = p->swap_remap[offset] & 0x7FFFFFFF;
++      /* Zero means it was not re-mapped */
++      if (!old)
++              goto out;
++      /* Clear the re-mapping */
++      p->swap_remap[offset] &= 0x80000000;
++      /* Mark the re-mapped page as unused */
++      p->swap_remap[old] &= 0x7FFFFFFF;
++      /* Record how many free pages there are */
++      p->gaps_exist += 1;
++out:
++      spin_unlock(&p->remap_lock);
++      return 0;
+ }
+ /*
+@@ -977,14 +996,123 @@
+       spin_unlock(&mmlist_lock);
+ }
++/* Find the largest sequence of free pages */
++int find_gap(struct swap_info_struct *sis)
++{
++      unsigned i, uninitialized_var(start), uninitialized_var(gap_next);
++      unsigned uninitialized_var(gap_end), gap_size = 0;
++      int in_gap = 0;
++
++      spin_unlock(&sis->remap_lock);
++      cond_resched();
++      mutex_lock(&sis->remap_mutex);
++
++      /* Check if a gap was found while we waited for the mutex */
++      spin_lock(&sis->remap_lock);
++      if (sis->gap_next <= sis->gap_end) {
++              mutex_unlock(&sis->remap_mutex);
++              return 0;
++      }
++      if (!sis->gaps_exist) {
++              mutex_unlock(&sis->remap_mutex);
++              return -1;
++      }
++      spin_unlock(&sis->remap_lock);
++
++      /*
++       * There is no current gap, so no new re-mappings can be made without
++       * going through this function (find_gap) which is protected by the
++       * remap_mutex.
++       */
++      for (i = 1; i < sis->max; i++) {
++              if (in_gap) {
++                      if (!(sis->swap_remap[i] & 0x80000000))
++                              continue;
++                      if (i - start > gap_size) {
++                              gap_next = start;
++                              gap_end = i - 1;
++                              gap_size = i - start;
++                      }
++                      in_gap = 0;
++              } else {
++                      if (sis->swap_remap[i] & 0x80000000)
++                              continue;
++                      in_gap = 1;
++                      start = i;
++              }
++              cond_resched();
++      }
++      spin_lock(&sis->remap_lock);
++      if (in_gap && i - start > gap_size) {
++              sis->gap_next = start;
++              sis->gap_end = i - 1;
++      } else {
++              sis->gap_next = gap_next;
++              sis->gap_end = gap_end;
++      }
++      mutex_unlock(&sis->remap_mutex);
++      return 0;
++}
++
+ /*
+  * Use this swapdev's extent info to locate the (PAGE_SIZE) block which
+  * corresponds to page offset `offset'.
+  */
+-sector_t map_swap_page(struct swap_info_struct *sis, pgoff_t offset)
++sector_t map_swap_page(struct swap_info_struct *sis, pgoff_t offset, int write)
+ {
+       struct swap_extent *se = sis->curr_swap_extent;
+       struct swap_extent *start_se = se;
++      unsigned old;
++
++      /*
++       * Instead of using the offset we are given, re-map it to the next
++       * sequential position.
++       */
++      spin_lock(&sis->remap_lock);
++      /* Get the old re-mapping */
++      old = sis->swap_remap[offset] & 0x7FFFFFFF;
++      if (write) {
++              /* See if we have free pages */
++              if (sis->gap_next > sis->gap_end) {
++                      /* The gap is used up. Find another one */
++                      if (!sis->gaps_exist || find_gap(sis) < 0) {
++                              /*
++                               * Out of space, so this page must have a
++                               * re-mapping, so use that.
++                               */
++                              BUG_ON(!old);
++                              sis->gap_next = sis->gap_end = old;
++                      }
++              }
++              /* Zero means it was not re-mapped previously */
++              if (old) {
++                      /* Clear the re-mapping */
++                      sis->swap_remap[offset] &= 0x80000000;
++                      /* Mark the re-mapped page as unused */
++                      sis->swap_remap[old] &= 0x7FFFFFFF;
++              } else {
++                      /* Record how many free pages there are */
++                      sis->gaps_exist -= 1;
++              }
++              /* Create the re-mapping to the next free page */
++              sis->swap_remap[offset] |= sis->gap_next;
++              /* Mark it as used */
++              sis->swap_remap[sis->gap_next] |= 0x80000000;
++              /* Use the re-mapped page number */
++              offset = sis->gap_next;
++              /* Update the free pages gap */
++              sis->gap_next += 1;
++      } else {
++              /*
++               * Always read from the existing re-mapping
++               * if there is one. There may not be because
++               * 'swapin_readahead()' has won a race with
++               * 'add_to_swap()'.
++               */
++              if (old)
++                      offset = old;
++      }
++      spin_unlock(&sis->remap_lock);
+       for ( ; ; ) {
+               struct list_head *lh;
+@@ -1015,7 +1143,8 @@
+               return 0;
+       sis = swap_info + swap_type;
+-      return (sis->flags & SWP_WRITEOK) ? map_swap_page(sis, offset) : 0;
++#error map_swap_page does not support hibernation
++      return (sis->flags & SWP_WRITEOK) ? map_swap_page(sis, offset, 0) : 0;
+ }
+ #endif /* CONFIG_HIBERNATION */
+@@ -1342,6 +1471,7 @@
+       p->flags = 0;
+       spin_unlock(&swap_lock);
+       mutex_unlock(&swapon_mutex);
++      vfree(p->swap_remap);
+       vfree(swap_map);
+       inode = mapping->host;
+       if (S_ISBLK(inode->i_mode)) {
+@@ -1485,6 +1615,7 @@
+       unsigned long maxpages = 1;
+       int swapfilesize;
+       unsigned short *swap_map = NULL;
++      unsigned int *swap_remap = NULL;
+       struct page *page = NULL;
+       struct inode *inode = NULL;
+       int did_down = 0;
+@@ -1654,9 +1785,15 @@
+                       error = -ENOMEM;
+                       goto bad_swap;
+               }
++              swap_remap = vmalloc(maxpages * sizeof(unsigned));
++              if (!swap_remap) {
++                      error = -ENOMEM;
++                      goto bad_swap;
++              }
+               error = 0;
+               memset(swap_map, 0, maxpages * sizeof(short));
++              memset(swap_remap, 0, maxpages * sizeof(unsigned));
+               for (i = 0; i < swap_header->info.nr_badpages; i++) {
+                       int page_nr = swap_header->info.badpages[i];
+                       if (page_nr <= 0 || page_nr >= swap_header->info.last_page)
+@@ -1696,6 +1833,12 @@
+       else
+               p->prio = --least_priority;
+       p->swap_map = swap_map;
++      p->swap_remap = swap_remap;
++      p->gap_next = 1;
++      p->gap_end = p->max - 1;
++      p->gaps_exist = p->max - 1;
++      spin_lock_init(&p->remap_lock);
++      mutex_init(&p->remap_mutex);
+       p->flags = SWP_ACTIVE;
+       nr_swap_pages += nr_good_pages;
+       total_swap_pages += nr_good_pages;
+@@ -1734,6 +1877,7 @@
+       p->swap_file = NULL;
+       p->flags = 0;
+       spin_unlock(&swap_lock);
++      vfree(swap_remap);
+       vfree(swap_map);
+       if (swap_file)
+               filp_close(swap_file, NULL);
index 1efb587..8619da7 100644 (file)
@@ -3,7 +3,10 @@ maemo-build.diff
 unionfs-2.5.3.diff
 dm-loop.diff
 usbip.diff
-nilfs2-2.0.18.diff
+#nilfs2-2.0.18.diff
 minstrel-aziwoqpa.diff
 iphb-matan.diff
 ppp_async_matan.diff
+2.6.28.10.diff
+block2mtd-yoush.diff
+gentoo-fsfixes.diff
index 99788e7..e9cb0ec 100644 (file)
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
 # Linux kernel version: 2.6.28maemo-omap1
-# Sun Feb 28 20:48:24 2010
+# Tue Mar  9 09:28:49 2010
 #
 CONFIG_ARM=y
 CONFIG_SYS_SUPPORTS_APM_EMULATION=y
@@ -119,7 +119,7 @@ CONFIG_KMOD=y
 CONFIG_BLOCK=y
 # CONFIG_LBD is not set
 # CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
+CONFIG_LSF=y
 # CONFIG_BLK_DEV_BSG is not set
 # CONFIG_BLK_DEV_INTEGRITY is not set
 
@@ -730,9 +730,9 @@ CONFIG_MTD_PARTITIONS=y
 # User Modules And Translation Layers
 #
 CONFIG_MTD_CHAR=y
-# CONFIG_MTD_BLKDEVS is not set
-# CONFIG_MTD_BLOCK is not set
-# CONFIG_MTD_BLOCK_RO is not set
+CONFIG_MTD_BLKDEVS=m
+CONFIG_MTD_BLOCK=m
+CONFIG_MTD_BLOCK_RO=m
 # CONFIG_FTL is not set
 # CONFIG_NFTL is not set
 # CONFIG_INFTL is not set
@@ -773,7 +773,7 @@ CONFIG_MTD_CFI_I2=y
 # CONFIG_MTD_SLRAM is not set
 # CONFIG_MTD_PHRAM is not set
 # CONFIG_MTD_MTDRAM is not set
-# CONFIG_MTD_BLOCK2MTD is not set
+CONFIG_MTD_BLOCK2MTD=m
 
 #
 # Disk-On-Chip Device Drivers
@@ -781,7 +781,17 @@ CONFIG_MTD_CFI_I2=y
 # CONFIG_MTD_DOC2000 is not set
 # CONFIG_MTD_DOC2001 is not set
 # CONFIG_MTD_DOC2001PLUS is not set
-# CONFIG_MTD_NAND is not set
+CONFIG_MTD_NAND=m
+# CONFIG_MTD_NAND_VERIFY_WRITE is not set
+# CONFIG_MTD_NAND_ECC_SMC is not set
+# CONFIG_MTD_NAND_MUSEUM_IDS is not set
+# CONFIG_MTD_NAND_GPIO is not set
+# CONFIG_MTD_NAND_OMAP2 is not set
+CONFIG_MTD_NAND_IDS=m
+# CONFIG_MTD_NAND_DISKONCHIP is not set
+CONFIG_MTD_NAND_NANDSIM=m
+# CONFIG_MTD_NAND_PLATFORM is not set
+# CONFIG_MTD_ALAUDA is not set
 CONFIG_MTD_ONENAND=y
 # CONFIG_MTD_ONENAND_VERIFY_WRITE is not set
 # CONFIG_MTD_ONENAND_GENERIC is not set
@@ -1943,7 +1953,7 @@ CONFIG_XFS_FS=m
 # CONFIG_XFS_POSIX_ACL is not set
 # CONFIG_XFS_RT is not set
 # CONFIG_XFS_DEBUG is not set
-CONFIG_NILFS2_FS=m
+# CONFIG_NILFS2_FS is not set
 # CONFIG_OCFS2_FS is not set
 CONFIG_DNOTIFY=y
 CONFIG_INOTIFY=y