* Downloading the sources:
- apt-get source kernel-maemo
+ apt-get source kernel-power
* Building the kernel packages:
* Modifying the kernel configuration:
in the kernel top directory do:
- cp debian/rx51maemo_defconfig .config
+ cp debian/rx51power_defconfig .config
make menuconfig
- cp .config debian/rx51maemo_defconfig
+ cp .config debian/rx51power_defconfig
make mrproper
and the rebuild it again
For more details read the quilt documentation.
Thomas Tanner <maemo@tannerlab.com>
-
+kernel-power (2.6.28-maemo25) fremantle; urgency=low
+
+ * control: new bugtracker, fix builddeps, set display name, mention reboot
+ * added ondemand-avoid patches by Matan Ziv-Av
+ * avoid frequency 125Mhz by default, change back 124999->125000
+
+ -- Thomas Tanner <maemo@tannerlab.com> Thu, 15 Apr 2010 00:00:00 +0100
+
kernel-power (2.6.28-maemo24) fremantle; urgency=low
* renamed package to kernel-power due to trademark issues
Section: utils
Priority: optional
Maintainer: Thomas Tanner <maemo@tannerlab.com>
-XSBC-Bugtracker: http://talk.maemo.org/showthread.php?t=43420
-XSBC-Original-Maintainer: Yauheni Kaliuta <yauheni.kaliuta@nokia.com>
+XSBC-Bugtracker: https://garage.maemo.org/tracker/?group_id=1528
Build-Depends: debhelper (>= 4.0.0), quilt, sdk-fiasco-gen, libncurses5-dev
-Build-Depends-Indep: bzip2
Standards-Version: 3.8.0
Package: kernel-power-flasher
kernel-feature-unionfs, kernel-feature-crypto, kernel-feature-dmloop, kernel-feature-utf8, kernel-feature-mousejoy,
kernel-feature-usbip, kernel-feature-ppp, kernel-feature-qos, kernel-feature-block2mtd, kernel-feature-kexec,
kernel-feature-overclock, kernel-feature-joikuspot, kernel-feature-slip, kernel-feature-battery, kernel-feature-pptp
+XB-Maemo-Display-Name: Enhanced Linux kernel for power users
Description: Linux kernel updater for an enhanced Maemo 5 kernel 2.6.28.10
- This package will flash the kernel image upon installation. If you want to revert to the
- stock Nokia kernel, run "apt-get install --reinstall kernel kernel-flasher" as root
+ This package will flash the kernel image upon installation.
+ You must shutdown and boot your device again for the kernel to become active.
+ .
+ If you want to revert to the stock Nokia kernel, run
+ "apt-get install --reinstall kernel kernel-flasher" as root
or use the flasher to flash the extracted zImage from the firmware image.
- For discussion and support please visit http://talk.maemo.org/showthread.php?t=43420
+ For discussion of the kernel please visit http://talk.maemo.org/showthread.php?t=43420
.
This custom kernel contains additional modules for IPv6, packet filtering,
QoS, NAT, tunneling, kernel configuration, Wifi mesh networking,
serial support, USB/IP and generic USB device drivers, battery info,
overclocking and kexec support.
.
- Warning: Overclocking may damage your device and is at your own risk!
+ Warning: Overclocking may damage your device, void your warranty and is at your own risk!
Overclocking is disabled by default and needs to be enabled during runtime.
.
Known bugs: Touchscreen calibration (osso-applet-screencalibration) is incompatible
+++ /dev/null
---- kernel-maemo-2.6.28.orig/mm/swapfile.c
-+++ kernel-maemo-2.6.28/mm/swapfile.c
-@@ -273,22 +273,41 @@
- static int swap_entry_free(struct swap_info_struct *p, unsigned long offset)
- {
- int count = p->swap_map[offset];
-+ unsigned old;
-
-- if (count < SWAP_MAP_MAX) {
-- count--;
-- p->swap_map[offset] = count;
-- if (!count) {
-- if (offset < p->lowest_bit)
-- p->lowest_bit = offset;
-- if (offset > p->highest_bit)
-- p->highest_bit = offset;
-- if (p->prio > swap_info[swap_list.next].prio)
-- swap_list.next = p - swap_info;
-- nr_swap_pages++;
-- p->inuse_pages--;
-- }
-- }
-- return count;
-+ if (count >= SWAP_MAP_MAX)
-+ return count;
-+
-+ count--;
-+ p->swap_map[offset] = count;
-+ if (count)
-+ return count;
-+
-+ spin_lock(&p->remap_lock);
-+
-+ if (offset < p->lowest_bit)
-+ p->lowest_bit = offset;
-+ if (offset > p->highest_bit)
-+ p->highest_bit = offset;
-+ if (p->prio > swap_info[swap_list.next].prio)
-+ swap_list.next = p - swap_info;
-+ nr_swap_pages++;
-+ p->inuse_pages--;
-+
-+ /* Re-map the page number */
-+ old = p->swap_remap[offset] & 0x7FFFFFFF;
-+ /* Zero means it was not re-mapped */
-+ if (!old)
-+ goto out;
-+ /* Clear the re-mapping */
-+ p->swap_remap[offset] &= 0x80000000;
-+ /* Mark the re-mapped page as unused */
-+ p->swap_remap[old] &= 0x7FFFFFFF;
-+ /* Record how many free pages there are */
-+ p->gaps_exist += 1;
-+out:
-+ spin_unlock(&p->remap_lock);
-+ return 0;
- }
-
- /*
-@@ -977,14 +996,123 @@
- spin_unlock(&mmlist_lock);
- }
-
-+/* Find the largest sequence of free pages */
-+int find_gap(struct swap_info_struct *sis)
-+{
-+ unsigned i, uninitialized_var(start), uninitialized_var(gap_next);
-+ unsigned uninitialized_var(gap_end), gap_size = 0;
-+ int in_gap = 0;
-+
-+ spin_unlock(&sis->remap_lock);
-+ cond_resched();
-+ mutex_lock(&sis->remap_mutex);
-+
-+ /* Check if a gap was found while we waited for the mutex */
-+ spin_lock(&sis->remap_lock);
-+ if (sis->gap_next <= sis->gap_end) {
-+ mutex_unlock(&sis->remap_mutex);
-+ return 0;
-+ }
-+ if (!sis->gaps_exist) {
-+ mutex_unlock(&sis->remap_mutex);
-+ return -1;
-+ }
-+ spin_unlock(&sis->remap_lock);
-+
-+ /*
-+ * There is no current gap, so no new re-mappings can be made without
-+ * going through this function (find_gap) which is protected by the
-+ * remap_mutex.
-+ */
-+ for (i = 1; i < sis->max; i++) {
-+ if (in_gap) {
-+ if (!(sis->swap_remap[i] & 0x80000000))
-+ continue;
-+ if (i - start > gap_size) {
-+ gap_next = start;
-+ gap_end = i - 1;
-+ gap_size = i - start;
-+ }
-+ in_gap = 0;
-+ } else {
-+ if (sis->swap_remap[i] & 0x80000000)
-+ continue;
-+ in_gap = 1;
-+ start = i;
-+ }
-+ cond_resched();
-+ }
-+ spin_lock(&sis->remap_lock);
-+ if (in_gap && i - start > gap_size) {
-+ sis->gap_next = start;
-+ sis->gap_end = i - 1;
-+ } else {
-+ sis->gap_next = gap_next;
-+ sis->gap_end = gap_end;
-+ }
-+ mutex_unlock(&sis->remap_mutex);
-+ return 0;
-+}
-+
- /*
- * Use this swapdev's extent info to locate the (PAGE_SIZE) block which
- * corresponds to page offset `offset'.
- */
--sector_t map_swap_page(struct swap_info_struct *sis, pgoff_t offset)
-+sector_t map_swap_page(struct swap_info_struct *sis, pgoff_t offset, int write)
- {
- struct swap_extent *se = sis->curr_swap_extent;
- struct swap_extent *start_se = se;
-+ unsigned old;
-+
-+ /*
-+ * Instead of using the offset we are given, re-map it to the next
-+ * sequential position.
-+ */
-+ spin_lock(&sis->remap_lock);
-+ /* Get the old re-mapping */
-+ old = sis->swap_remap[offset] & 0x7FFFFFFF;
-+ if (write) {
-+ /* See if we have free pages */
-+ if (sis->gap_next > sis->gap_end) {
-+ /* The gap is used up. Find another one */
-+ if (!sis->gaps_exist || find_gap(sis) < 0) {
-+ /*
-+ * Out of space, so this page must have a
-+ * re-mapping, so use that.
-+ */
-+ BUG_ON(!old);
-+ sis->gap_next = sis->gap_end = old;
-+ }
-+ }
-+ /* Zero means it was not re-mapped previously */
-+ if (old) {
-+ /* Clear the re-mapping */
-+ sis->swap_remap[offset] &= 0x80000000;
-+ /* Mark the re-mapped page as unused */
-+ sis->swap_remap[old] &= 0x7FFFFFFF;
-+ } else {
-+ /* Record how many free pages there are */
-+ sis->gaps_exist -= 1;
-+ }
-+ /* Create the re-mapping to the next free page */
-+ sis->swap_remap[offset] |= sis->gap_next;
-+ /* Mark it as used */
-+ sis->swap_remap[sis->gap_next] |= 0x80000000;
-+ /* Use the re-mapped page number */
-+ offset = sis->gap_next;
-+ /* Update the free pages gap */
-+ sis->gap_next += 1;
-+ } else {
-+ /*
-+ * Always read from the existing re-mapping
-+ * if there is one. There may not be because
-+ * 'swapin_readahead()' has won a race with
-+ * 'add_to_swap()'.
-+ */
-+ if (old)
-+ offset = old;
-+ }
-+ spin_unlock(&sis->remap_lock);
-
- for ( ; ; ) {
- struct list_head *lh;
-@@ -1015,7 +1143,8 @@
- return 0;
-
- sis = swap_info + swap_type;
-- return (sis->flags & SWP_WRITEOK) ? map_swap_page(sis, offset) : 0;
-+#error map_swap_page does not support hibernation
-+ return (sis->flags & SWP_WRITEOK) ? map_swap_page(sis, offset, 0) : 0;
- }
- #endif /* CONFIG_HIBERNATION */
-
-@@ -1342,6 +1471,7 @@
- p->flags = 0;
- spin_unlock(&swap_lock);
- mutex_unlock(&swapon_mutex);
-+ vfree(p->swap_remap);
- vfree(swap_map);
- inode = mapping->host;
- if (S_ISBLK(inode->i_mode)) {
-@@ -1485,6 +1615,7 @@
- unsigned long maxpages = 1;
- int swapfilesize;
- unsigned short *swap_map = NULL;
-+ unsigned int *swap_remap = NULL;
- struct page *page = NULL;
- struct inode *inode = NULL;
- int did_down = 0;
-@@ -1654,9 +1785,15 @@
- error = -ENOMEM;
- goto bad_swap;
- }
-+ swap_remap = vmalloc(maxpages * sizeof(unsigned));
-+ if (!swap_remap) {
-+ error = -ENOMEM;
-+ goto bad_swap;
-+ }
-
- error = 0;
- memset(swap_map, 0, maxpages * sizeof(short));
-+ memset(swap_remap, 0, maxpages * sizeof(unsigned));
- for (i = 0; i < swap_header->info.nr_badpages; i++) {
- int page_nr = swap_header->info.badpages[i];
- if (page_nr <= 0 || page_nr >= swap_header->info.last_page)
-@@ -1696,6 +1833,12 @@
- else
- p->prio = --least_priority;
- p->swap_map = swap_map;
-+ p->swap_remap = swap_remap;
-+ p->gap_next = 1;
-+ p->gap_end = p->max - 1;
-+ p->gaps_exist = p->max - 1;
-+ spin_lock_init(&p->remap_lock);
-+ mutex_init(&p->remap_mutex);
- p->flags = SWP_ACTIVE;
- nr_swap_pages += nr_good_pages;
- total_swap_pages += nr_good_pages;
-@@ -1734,6 +1877,7 @@
- p->swap_file = NULL;
- p->flags = 0;
- spin_unlock(&swap_lock);
-+ vfree(swap_remap);
- vfree(swap_map);
- if (swap_file)
- filp_close(swap_file, NULL);
--- /dev/null
+--- kernel-power-2.6.28.orig/drivers/cpufreq/cpufreq_ondemand.c
++++ kernel-power-2.6.28/drivers/cpufreq/cpufreq_ondemand.c
+@@ -57,6 +57,9 @@
+ #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
+ #define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
+
++static int avoid_frequencies_count=0;
++static unsigned int avoid_frequencies_table[16];
++
+ static void do_dbs_timer(struct work_struct *work);
+
+ /* Sampling types */
+@@ -105,6 +108,52 @@
+ .powersave_bias = 0,
+ };
+
++static unsigned int find_min_frequency(struct cpufreq_policy *policy,
++ struct cpufreq_frequency_table *table)
++{
++ int i, f;
++ f=policy->max;
++ i=0;
++ while(table[i].frequency!=CPUFREQ_TABLE_END) {
++ if((table[i].frequency<f) &&
++ (table[i].frequency>=policy->min))
++ f=table[i].frequency;
++ i++;
++ }
++ return f;
++}
++
++static unsigned int find_max_frequency(struct cpufreq_policy *policy,
++ struct cpufreq_frequency_table *table)
++{
++ int i, f;
++ f=policy->min;
++ i=0;
++ while(table[i].frequency!=CPUFREQ_TABLE_END) {
++ if((table[i].frequency>f) &&
++ (table[i].frequency<=policy->max))
++ f=table[i].frequency;
++ i++;
++ }
++ return f;
++}
++
++static unsigned int find_lower_frequency(struct cpufreq_policy *policy,
++ struct cpufreq_frequency_table *table,
++ unsigned int freq)
++{
++ int i, f;
++ f=find_min_frequency(policy, table);
++ i=0;
++ while(table[i].frequency!=CPUFREQ_TABLE_END) {
++ if((table[i].frequency>f) &&
++ (table[i].frequency<=freq))
++ f=table[i].frequency;
++ i++;
++ }
++ return f;
++}
++
+ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
+ cputime64_t *wall)
+ {
+@@ -218,8 +267,32 @@
+ int i;
+ for_each_online_cpu(i) {
+ struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, i);
+- dbs_info->freq_table = cpufreq_frequency_get_table(i);
++ struct cpufreq_frequency_table *table;
++ int l, k;
++ table = cpufreq_frequency_get_table(i);
++ l=0;
++ k=0;
++ while(table[k].frequency != CPUFREQ_TABLE_END) {
++ if(table[k].frequency != CPUFREQ_ENTRY_INVALID) {
++ int t,j;
++ t=1;
++ for(j=0;j<avoid_frequencies_count;j++) if(table[k].frequency==avoid_frequencies_table[j]) t=0;
++ l+=t;
++ }
++ k++;
++ }
++ if(dbs_info->freq_table) kfree(dbs_info->freq_table );
++ dbs_info->freq_table = kzalloc(sizeof(struct cpufreq_frequency_table)*(l+1), GFP_KERNEL);
++ for(l=0,k=0; (table[l].frequency != CPUFREQ_TABLE_END); l++)
++ if (table[l].frequency != CPUFREQ_ENTRY_INVALID) {
++ int t,j;
++ t=1;
++ for(j=0;j<avoid_frequencies_count;j++) if(table[l].frequency==avoid_frequencies_table[j]) t=0;
++ if(t)memcpy(&dbs_info->freq_table[k++], &table[l], sizeof(struct cpufreq_frequency_table));
++ }
++ dbs_info->freq_table[k].frequency = CPUFREQ_TABLE_END;
+ dbs_info->freq_lo = 0;
++
+ }
+ }
+
+@@ -357,6 +430,49 @@
+ define_one_rw(ignore_nice_load);
+ define_one_rw(powersave_bias);
+
++static ssize_t show_avoid_frequencies(struct cpufreq_policy *unused,
++ char *buf)
++{
++ int i;
++ char *b=buf;
++ for(i=0;i<avoid_frequencies_count;i++)
++ b+=sprintf(b, "%d ", avoid_frequencies_table[i]);
++ b+=sprintf(b, "\n");
++ return b-buf;
++}
++
++static ssize_t store_avoid_frequencies(struct cpufreq_policy *unused,
++ const char *buf, size_t n)
++{
++ unsigned int value[16];
++ int i;
++
++ i=sscanf(buf, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
++ &value[0], &value[1], &value[2], &value[3],
++ &value[4], &value[5], &value[6], &value[7],
++ &value[8], &value[9], &value[10], &value[11],
++ &value[12], &value[13], &value[14], &value[15]
++ );
++ if(i<0) {
++ printk(KERN_ERR "avoid_frequencies: Invalid value\n");
++ return -EINVAL;
++ }
++
++ avoid_frequencies_count=i;
++
++ for(i=0;i<avoid_frequencies_count;i++) {
++ avoid_frequencies_table[i]=value[i];
++ }
++
++ mutex_lock(&dbs_mutex);
++ ondemand_powersave_bias_init();
++ mutex_unlock(&dbs_mutex);
++
++ return n;
++};
++
++define_one_rw(avoid_frequencies);
++
+ static struct attribute * dbs_attributes[] = {
+ &sampling_rate_max.attr,
+ &sampling_rate_min.attr,
+@@ -364,6 +480,7 @@
+ &up_threshold.attr,
+ &ignore_nice_load.attr,
+ &powersave_bias.attr,
++ &avoid_frequencies.attr,
+ NULL
+ };
+
+@@ -439,10 +556,9 @@
+ if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
+ /* if we are already at full speed then break out early */
+ if (!dbs_tuners_ins.powersave_bias) {
+- if (policy->cur == policy->max)
++ if (policy->cur == find_max_frequency(policy, this_dbs_info->freq_table))
+ return;
+-
+- __cpufreq_driver_target(policy, policy->max,
++ __cpufreq_driver_target(policy, find_max_frequency(policy, this_dbs_info->freq_table),
+ CPUFREQ_RELATION_H);
+ } else {
+ int freq = powersave_bias_target(policy, policy->max,
+@@ -472,7 +588,8 @@
+ dbs_tuners_ins.down_differential);
+
+ if (!dbs_tuners_ins.powersave_bias) {
+- __cpufreq_driver_target(policy, freq_next,
++
++ __cpufreq_driver_target(policy, find_lower_frequency(policy, this_dbs_info->freq_table, freq_next),
+ CPUFREQ_RELATION_L);
+ } else {
+ int freq = powersave_bias_target(policy, freq_next,
+@@ -550,7 +667,7 @@
+ this_dbs_info = &per_cpu(cpu_dbs_info, 0);
+ policy = this_dbs_info->cur_policy;
+
+- __cpufreq_driver_target(policy, policy->max,
++ __cpufreq_driver_target(policy, find_max_frequency(policy, this_dbs_info->freq_table),
+ CPUFREQ_RELATION_L);
+ this_dbs_info->prev_cpu_idle = get_cpu_idle_time(0,
+ &this_dbs_info->prev_cpu_wall);
--- kernel-power-2.6.28.orig/arch/arm/mach-omap2/omap3-opp.h
+++ kernel-power-2.6.28/arch/arm/mach-omap2/omap3-opp.h
-@@ -4,13 +4,23 @@
+@@ -4,6 +4,15 @@
#include <mach/omap-pm.h>
/* MPU speeds */
#define S600M 600000000
#define S550M 550000000
#define S500M 500000000
- #define S250M 250000000
--#define S125M 125000000
-+#define S125M 124999000 /* temporary workaround to ignore telephone app's stubborn request for the lowest available frequency */
+@@ -11,6 +20,7 @@
+ #define S125M 125000000
/* DSP speeds */
+#define S520M 520000000
#ifdef CONFIG_OMAP_PM_SRF
error = sysfs_create_file(power_kobj,
&vdd1_opp_attr.attr);
+--- kernel-power-2.6.28.orig/drivers/cpufreq/cpufreq_ondemand.c
++++ kernel-power-2.6.28/drivers/cpufreq/cpufreq_ondemand.c
+@@ -844,6 +844,10 @@
+ u64 idle_time;
+ int cpu = get_cpu();
+
++ /* N900 hack */
++ avoid_frequencies_table[0] = 125000;
++ avoid_frequencies_count=1;
++
+ idle_time = get_cpu_idle_time_us(cpu, &wall);
+ put_cpu();
+ if (idle_time != -1ULL) {
trig-keyb.diff
twl-scrollock.diff
squashfs.diff
+ondemand-avoid.diff
overclock.diff
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.28.10maemo-omap1
+# Linux kernel version: 2.6.28.10power-omap1
# Wed Apr 14 22:22:49 2010
#
CONFIG_ARM=y