Merge branch 'master' of http://atrey.karlin.mff.cuni.cz/~pali/kernel-power.git
authorPali Rohár <pali.rohar@gmail.com>
Thu, 11 Aug 2011 09:46:44 +0000 (11:46 +0200)
committerPali Rohár <pali.rohar@gmail.com>
Sat, 13 Aug 2011 08:47:02 +0000 (10:47 +0200)
kernel-power-2.6.28/debian/kernel-power-modules.postinst.in
kernel-power-2.6.28/debian/patches/anti-io-stalling.diff [new file with mode: 0644]
kernel-power-2.6.28/debian/patches/bq24150-sniff.diff [new file with mode: 0644]
kernel-power-2.6.28/debian/patches/minstrel-aziwoqpa.diff [new file with mode: 0644]
kernel-power-2.6.28/debian/patches/minstrel-aziwoqpa_fixed.diff [deleted file]
kernel-power-2.6.28/debian/patches/series
kernel-power-2.6.28/debian/patches/vanilla-2.6.28-anti-io-stalling.diff [deleted file]
kernel-power-2.6.28/debian/usbehci.diff [deleted file]

index 702b45e..bfc427b 100644 (file)
@@ -10,5 +10,8 @@ if grep -q "MODULE_PATH=/lib/modules/current" /sbin/preinit; then
     sed 's%MODULE_PATH=/lib/modules/current%MODULE_PATH=/lib/modules/`uname -r` #fixed by kernel-power%' -i /sbin/preinit
 fi
 
+dpkg-divert --local --divert /lib/modules/2.6.28-omap1/JoikuSpot_Bouncer.ko --rename --add /usr/bin/JoikuSpot_Bouncer.ko || :
+ln -sf /lib/modules/current/JoikuSpot_Bouncer.ko /usr/bin/JoikuSpot_Bouncer.ko || :
+
 #DEBHELPER#
 
diff --git a/kernel-power-2.6.28/debian/patches/anti-io-stalling.diff b/kernel-power-2.6.28/debian/patches/anti-io-stalling.diff
new file mode 100644 (file)
index 0000000..1eb82e3
--- /dev/null
@@ -0,0 +1,138 @@
+--- kernel-2.6.28/mm/vmscan.c.orig     2009-05-02 14:54:43.000000000 -0400
++++ kernel-2.6.28/mm/vmscan.c  2010-11-11 12:06:49.955635002 -0500
+@@ -72,6 +72,12 @@ struct scan_control {
+       int order;
++      /*
++       * Intend to reclaim enough contenious memory rather than to reclaim
++       * enough amount memory. I.e, it's the mode for high order allocation.
++       */
++      bool lumpy_reclaim_mode;
++
+       /* Which cgroup do we reclaim from */
+       struct mem_cgroup *mem_cgroup;
+@@ -1024,6 +1030,47 @@ int isolate_lru_page(struct page *page)
+ }
+ /*
++ * Returns true if the caller should wait to clean dirty/writeback pages.
++ *
++ * If we are direct reclaiming for contiguous pages and we do not reclaim
++ * everything in the list, try again and wait for writeback IO to complete.
++ * This will stall high-order allocations noticeably. Only do that when really
++ * need to free the pages under high memory pressure.
++ */
++static inline bool should_reclaim_stall(unsigned long nr_taken,
++                                      unsigned long nr_freed,
++                                      int priority,
++                                      struct scan_control *sc)
++{
++      int lumpy_stall_priority;
++
++      /* kswapd should not stall on sync IO */
++      if (current_is_kswapd())
++              return false;
++
++      /* Only stall on lumpy reclaim */
++      if (!sc->lumpy_reclaim_mode)
++              return false;
++
++      /* If we have relaimed everything on the isolated list, no stall */
++      if (nr_freed == nr_taken)
++              return false;
++
++      /*
++       * For high-order allocations, there are two stall thresholds.
++       * High-cost allocations stall immediately where as lower
++       * order allocations such as stacks require the scanning
++       * priority to be much higher before stalling.
++       */
++      if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
++              lumpy_stall_priority = DEF_PRIORITY;
++      else
++              lumpy_stall_priority = DEF_PRIORITY / 3;
++
++      return priority <= lumpy_stall_priority;
++}
++
++/*
+  * shrink_inactive_list() is a helper for shrink_zone().  It returns the number
+  * of reclaimed pages
+  */
+@@ -1047,7 +1094,7 @@ static unsigned long shrink_inactive_lis
+               unsigned long nr_freed;
+               unsigned long nr_active;
+               unsigned int count[NR_LRU_LISTS] = { 0, };
+-              int mode = ISOLATE_INACTIVE;
++// use lumpy  int mode = ISOLATE_INACTIVE;
+               /*
+                * If we need a large contiguous chunk of memory, or have
+@@ -1056,13 +1103,11 @@ static unsigned long shrink_inactive_lis
+                *
+                * We use the same threshold as pageout congestion_wait below.
+                */
+-              if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
+-                      mode = ISOLATE_BOTH;
+-              else if (sc->order && priority < DEF_PRIORITY - 2)
+-                      mode = ISOLATE_BOTH;
+               nr_taken = sc->isolate_pages(sc->swap_cluster_max,
+-                           &page_list, &nr_scan, sc->order, mode,
++                           &page_list, &nr_scan, sc->order, 
++                              sc->lumpy_reclaim_mode ?
++                                      ISOLATE_BOTH : ISOLATE_INACTIVE,
+                               zone, sc->mem_cgroup, 0, file);
+               nr_active = clear_active_flags(&page_list, count);
+               __count_vm_events(PGDEACTIVATE, nr_active);
+@@ -1088,16 +1133,8 @@ static unsigned long shrink_inactive_lis
+               nr_scanned += nr_scan;
+               nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
+-              /*
+-               * If we are direct reclaiming for contiguous pages and we do
+-               * not reclaim everything in the list, try again and wait
+-               * for IO to complete. This will stall high-order allocations
+-               * but that should be acceptable to the caller
+-               */
+-              if (nr_freed < nr_taken && !current_is_kswapd() &&
+-                                      sc->order > PAGE_ALLOC_COSTLY_ORDER) {
+-                      congestion_wait(WRITE, HZ/10);
+-
++              /* Check if we should syncronously wait for writeback */
++              if (should_reclaim_stall(nr_taken, nr_freed, priority, sc)) {
+                       /*
+                        * The attempt at page out may have made some
+                        * of the pages active, mark them inactive again.
+@@ -1404,6 +1441,20 @@ static void get_scan_ratio(struct zone *
+       percent[1] = 100 - percent[0];
+ }
++static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc)
++{
++      /*
++      * If we need a large contiguous chunk of memory, or have
++      * trouble getting a small set of contiguous pages, we
++      * will reclaim both active and inactive pages.
++      */
++      if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
++              sc->lumpy_reclaim_mode = 1;
++      else if (sc->order && priority < DEF_PRIORITY - 2)
++              sc->lumpy_reclaim_mode = 1;
++      else
++              sc->lumpy_reclaim_mode = 0;
++}
+ /*
+  * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
+@@ -1419,6 +1470,8 @@ static unsigned long shrink_zone(int pri
+       get_scan_ratio(zone, sc, percent);
++      set_lumpy_reclaim_mode(priority, sc);
++
+       for_each_evictable_lru(l) {
+               if (scan_global_lru(sc)) {
+                       int file = is_file_lru(l);
diff --git a/kernel-power-2.6.28/debian/patches/bq24150-sniff.diff b/kernel-power-2.6.28/debian/patches/bq24150-sniff.diff
new file mode 100644 (file)
index 0000000..78d6c80
--- /dev/null
@@ -0,0 +1,52 @@
+--- kernel-power-2.6.28.orig/drivers/i2c/i2c-core.c
++++ kernel-power-2.6.28/drivers/i2c/i2c-core.c
+@@ -1042,7 +1042,26 @@
+                               (msgs[ret].flags & I2C_M_RECV_LEN) ? "+" : "");
+               }
+ #endif
+-
++              // inserted
++              int i;
++              if (msgs[0].addr == 0x6b) {
++                printk("%s ", dev_name(&adap->dev));
++                for (ret = 0; ret < num; ret++) { // nr of messages in this call
++                  if (!(msgs[ret].flags & I2C_M_RD)) {
++                    printk("(W):");
++                    for (i = 0; i < msgs[ret].len; i++) {
++                      printk(" 0x%02x", msgs[ret].buf[i]);
++                    }
++                  }
++                  else {
++                    printk("(R) %d bytes", msgs[ret].len);
++                  }
++                  printk(", ");
++                }
++                printk("\n");
++              }
++              // end inserted
++              
+               if (in_atomic() || irqs_disabled()) {
+                       ret = mutex_trylock(&adap->bus_lock);
+                       if (!ret)
+@@ -1054,7 +1073,20 @@
+               ret = adap->algo->master_xfer(adap,msgs,num);
+               mutex_unlock(&adap->bus_lock);
+-
++              
++              // inserted
++              int j;
++              for (i = 0; i < num; i++) {
++                if (msgs[i].addr == 0x6b && (msgs[i].flags & I2C_M_RD)) {
++                  printk("i2c_read: ");
++                  for (j = 0; j < msgs[i].len; j++) {
++                    printk(" 0x%02x", msgs[i].buf[j]);
++                  }
++                  printk("\n");
++                }
++              }
++              // end inserted
++              
+               return ret;
+       } else {
+               dev_dbg(&adap->dev, "I2C level transfers not supported\n");
diff --git a/kernel-power-2.6.28/debian/patches/minstrel-aziwoqpa.diff b/kernel-power-2.6.28/debian/patches/minstrel-aziwoqpa.diff
new file mode 100644 (file)
index 0000000..e87eb71
--- /dev/null
@@ -0,0 +1,11 @@
+--- kernel-power-2.6.28.orig/net/mac80211/rc80211_minstrel.c.orig      2010-11-13 13:20:41.000000000 -0500
++++ kernel-power-2.6.28/net/mac80211/rc80211_minstrel.c        2010-11-13 13:46:05.246025157 -0500
+@@ -228,7 +228,7 @@
+       unsigned int sample_ndx;
+       sample_ndx = SAMPLE_TBL(mi, mi->sample_idx, mi->sample_column);
+       mi->sample_idx++;
+-      if ((int) mi->sample_idx > (mi->n_rates - 2)) {
++      if (mi->n_rates == 1 || (int) mi->sample_idx > (mi->n_rates - 2)) {
+               mi->sample_idx = 0;
+               mi->sample_column++;
+               if (mi->sample_column >= SAMPLE_COLUMNS)
diff --git a/kernel-power-2.6.28/debian/patches/minstrel-aziwoqpa_fixed.diff b/kernel-power-2.6.28/debian/patches/minstrel-aziwoqpa_fixed.diff
deleted file mode 100644 (file)
index e87eb71..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
---- kernel-power-2.6.28.orig/net/mac80211/rc80211_minstrel.c.orig      2010-11-13 13:20:41.000000000 -0500
-+++ kernel-power-2.6.28/net/mac80211/rc80211_minstrel.c        2010-11-13 13:46:05.246025157 -0500
-@@ -228,7 +228,7 @@
-       unsigned int sample_ndx;
-       sample_ndx = SAMPLE_TBL(mi, mi->sample_idx, mi->sample_column);
-       mi->sample_idx++;
--      if ((int) mi->sample_idx > (mi->n_rates - 2)) {
-+      if (mi->n_rates == 1 || (int) mi->sample_idx > (mi->n_rates - 2)) {
-               mi->sample_idx = 0;
-               mi->sample_column++;
-               if (mi->sample_column >= SAMPLE_COLUMNS)
index 5a879c0..f4f3d98 100644 (file)
@@ -2,17 +2,21 @@
 nokia-20094803.3+0m5.diff
 nokia-20100903+0m5.diff
 nokia-20101501+0m5.diff
+#nokia-20101501+0m5-nosmart.diff
 nokia-20103103+0m5.diff
+#nokia-20103103+0m5_usb.diff
 2.6.28.10.diff
 rx51_defconfig.diff
 unionfs-2.5.3.diff
 dm-loop.diff
 usbip.diff
 nilfs2-2.0.18.diff
+minstrel-aziwoqpa.diff
 iphb-matan.diff
 ppp_async_matan.diff
 block2mtd-yoush.diff
 gentoo-fsfixes.diff
+kexec.diff
 trig-keyb.diff
 twl-scrollock.diff
 squashfs.diff
@@ -22,8 +26,8 @@ mmcnames-fanoush.diff
 gethercharge.diff
 ondemand-avoid.diff
 overclock.diff
+#bq24150-sniff.diff
 armthumb.diff
-minstrel-aziwoqpa_fixed.diff
 wl12xx_rohar.diff
 fmtx.unlock.diff
 radio-bcm2048.diff
@@ -34,11 +38,10 @@ bt-mice.diff
 bq27x00_battery.diff
 l2cap_parent.diff
 wl12xx-rx-fix.diff
-vanilla-2.6.28-anti-io-stalling.diff
+anti-io-stalling.diff
 joikuspot.diff
 dspbridge.diff
 phys_to_page.diff
 ext4-data-corruption.diff
 patch_swap_notify_core_support_2.6.28.diff
 class10sd_dto14_fix.diff
-kexec.diff
diff --git a/kernel-power-2.6.28/debian/patches/vanilla-2.6.28-anti-io-stalling.diff b/kernel-power-2.6.28/debian/patches/vanilla-2.6.28-anti-io-stalling.diff
deleted file mode 100644 (file)
index 1eb82e3..0000000
+++ /dev/null
@@ -1,138 +0,0 @@
---- kernel-2.6.28/mm/vmscan.c.orig     2009-05-02 14:54:43.000000000 -0400
-+++ kernel-2.6.28/mm/vmscan.c  2010-11-11 12:06:49.955635002 -0500
-@@ -72,6 +72,12 @@ struct scan_control {
-       int order;
-+      /*
-+       * Intend to reclaim enough contenious memory rather than to reclaim
-+       * enough amount memory. I.e, it's the mode for high order allocation.
-+       */
-+      bool lumpy_reclaim_mode;
-+
-       /* Which cgroup do we reclaim from */
-       struct mem_cgroup *mem_cgroup;
-@@ -1024,6 +1030,47 @@ int isolate_lru_page(struct page *page)
- }
- /*
-+ * Returns true if the caller should wait to clean dirty/writeback pages.
-+ *
-+ * If we are direct reclaiming for contiguous pages and we do not reclaim
-+ * everything in the list, try again and wait for writeback IO to complete.
-+ * This will stall high-order allocations noticeably. Only do that when really
-+ * need to free the pages under high memory pressure.
-+ */
-+static inline bool should_reclaim_stall(unsigned long nr_taken,
-+                                      unsigned long nr_freed,
-+                                      int priority,
-+                                      struct scan_control *sc)
-+{
-+      int lumpy_stall_priority;
-+
-+      /* kswapd should not stall on sync IO */
-+      if (current_is_kswapd())
-+              return false;
-+
-+      /* Only stall on lumpy reclaim */
-+      if (!sc->lumpy_reclaim_mode)
-+              return false;
-+
-+      /* If we have relaimed everything on the isolated list, no stall */
-+      if (nr_freed == nr_taken)
-+              return false;
-+
-+      /*
-+       * For high-order allocations, there are two stall thresholds.
-+       * High-cost allocations stall immediately where as lower
-+       * order allocations such as stacks require the scanning
-+       * priority to be much higher before stalling.
-+       */
-+      if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
-+              lumpy_stall_priority = DEF_PRIORITY;
-+      else
-+              lumpy_stall_priority = DEF_PRIORITY / 3;
-+
-+      return priority <= lumpy_stall_priority;
-+}
-+
-+/*
-  * shrink_inactive_list() is a helper for shrink_zone().  It returns the number
-  * of reclaimed pages
-  */
-@@ -1047,7 +1094,7 @@ static unsigned long shrink_inactive_lis
-               unsigned long nr_freed;
-               unsigned long nr_active;
-               unsigned int count[NR_LRU_LISTS] = { 0, };
--              int mode = ISOLATE_INACTIVE;
-+// use lumpy  int mode = ISOLATE_INACTIVE;
-               /*
-                * If we need a large contiguous chunk of memory, or have
-@@ -1056,13 +1103,11 @@ static unsigned long shrink_inactive_lis
-                *
-                * We use the same threshold as pageout congestion_wait below.
-                */
--              if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
--                      mode = ISOLATE_BOTH;
--              else if (sc->order && priority < DEF_PRIORITY - 2)
--                      mode = ISOLATE_BOTH;
-               nr_taken = sc->isolate_pages(sc->swap_cluster_max,
--                           &page_list, &nr_scan, sc->order, mode,
-+                           &page_list, &nr_scan, sc->order, 
-+                              sc->lumpy_reclaim_mode ?
-+                                      ISOLATE_BOTH : ISOLATE_INACTIVE,
-                               zone, sc->mem_cgroup, 0, file);
-               nr_active = clear_active_flags(&page_list, count);
-               __count_vm_events(PGDEACTIVATE, nr_active);
-@@ -1088,16 +1133,8 @@ static unsigned long shrink_inactive_lis
-               nr_scanned += nr_scan;
-               nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
--              /*
--               * If we are direct reclaiming for contiguous pages and we do
--               * not reclaim everything in the list, try again and wait
--               * for IO to complete. This will stall high-order allocations
--               * but that should be acceptable to the caller
--               */
--              if (nr_freed < nr_taken && !current_is_kswapd() &&
--                                      sc->order > PAGE_ALLOC_COSTLY_ORDER) {
--                      congestion_wait(WRITE, HZ/10);
--
-+              /* Check if we should syncronously wait for writeback */
-+              if (should_reclaim_stall(nr_taken, nr_freed, priority, sc)) {
-                       /*
-                        * The attempt at page out may have made some
-                        * of the pages active, mark them inactive again.
-@@ -1404,6 +1441,20 @@ static void get_scan_ratio(struct zone *
-       percent[1] = 100 - percent[0];
- }
-+static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc)
-+{
-+      /*
-+      * If we need a large contiguous chunk of memory, or have
-+      * trouble getting a small set of contiguous pages, we
-+      * will reclaim both active and inactive pages.
-+      */
-+      if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
-+              sc->lumpy_reclaim_mode = 1;
-+      else if (sc->order && priority < DEF_PRIORITY - 2)
-+              sc->lumpy_reclaim_mode = 1;
-+      else
-+              sc->lumpy_reclaim_mode = 0;
-+}
- /*
-  * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
-@@ -1419,6 +1470,8 @@ static unsigned long shrink_zone(int pri
-       get_scan_ratio(zone, sc, percent);
-+      set_lumpy_reclaim_mode(priority, sc);
-+
-       for_each_evictable_lru(l) {
-               if (scan_global_lru(sc)) {
-                       int file = is_file_lru(l);
diff --git a/kernel-power-2.6.28/debian/usbehci.diff b/kernel-power-2.6.28/debian/usbehci.diff
deleted file mode 100644 (file)
index d5d51c6..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
---- rx51power_defconfig.orig   2010-05-20 00:18:55.000000000 +0200
-+++ rx51power_defconfig        2010-05-20 00:44:07.000000000 +0200
-@@ -1646,6 +1646,9 @@
- # USB Host Controller Drivers
- #
- # CONFIG_USB_C67X00_HCD is not set
-+CONFIG_USB_EHCI_HCD=y
-+# CONFIG_OMAP_EHCI_PHY_MODE is not set
-+CONFIG_OMAP_EHCI_TLL_MODE=y
- # CONFIG_USB_EHCI_HCD is not set
- # CONFIG_USB_ISP116X_HCD is not set
- # CONFIG_USB_OHCI_HCD is not set