Added patches from kernel-power bug tracker:
authorPali Rohár <pali.rohar@gmail.com>
Sat, 16 Apr 2011 00:38:45 +0000 (02:38 +0200)
committerPali Rohár <pali.rohar@gmail.com>
Sat, 16 Apr 2011 00:38:45 +0000 (02:38 +0200)
vanilla-2.6.28-anti-io-stalling.patch wl12xx-rx-fix.diff

kernel-power-2.6.28/debian/patches/series
kernel-power-2.6.28/debian/patches/vanilla-2.6.28-anti-io-stalling.patch [new file with mode: 0644]
kernel-power-2.6.28/debian/patches/wl12xx-rx-fix.diff [new file with mode: 0644]

index 4519f76..c0f5079 100644 (file)
@@ -31,3 +31,6 @@ usbhostmode.diff
 bt-mice.diff
 board-rx51-peripherals.patch
 bq27x00_battery.patch
+l2cap_parent.patch
+wl12xx-rx-fix.diff
+vanilla-2.6.28-anti-io-stalling.patch
diff --git a/kernel-power-2.6.28/debian/patches/vanilla-2.6.28-anti-io-stalling.patch b/kernel-power-2.6.28/debian/patches/vanilla-2.6.28-anti-io-stalling.patch
new file mode 100644 (file)
index 0000000..1eb82e3
--- /dev/null
@@ -0,0 +1,138 @@
+--- kernel-2.6.28/mm/vmscan.c.orig     2009-05-02 14:54:43.000000000 -0400
++++ kernel-2.6.28/mm/vmscan.c  2010-11-11 12:06:49.955635002 -0500
+@@ -72,6 +72,12 @@ struct scan_control {
+       int order;
++      /*
++       * Intend to reclaim enough contenious memory rather than to reclaim
++       * enough amount memory. I.e, it's the mode for high order allocation.
++       */
++      bool lumpy_reclaim_mode;
++
+       /* Which cgroup do we reclaim from */
+       struct mem_cgroup *mem_cgroup;
+@@ -1024,6 +1030,47 @@ int isolate_lru_page(struct page *page)
+ }
+ /*
++ * Returns true if the caller should wait to clean dirty/writeback pages.
++ *
++ * If we are direct reclaiming for contiguous pages and we do not reclaim
++ * everything in the list, try again and wait for writeback IO to complete.
++ * This will stall high-order allocations noticeably. Only do that when really
++ * need to free the pages under high memory pressure.
++ */
++static inline bool should_reclaim_stall(unsigned long nr_taken,
++                                      unsigned long nr_freed,
++                                      int priority,
++                                      struct scan_control *sc)
++{
++      int lumpy_stall_priority;
++
++      /* kswapd should not stall on sync IO */
++      if (current_is_kswapd())
++              return false;
++
++      /* Only stall on lumpy reclaim */
++      if (!sc->lumpy_reclaim_mode)
++              return false;
++
++      /* If we have relaimed everything on the isolated list, no stall */
++      if (nr_freed == nr_taken)
++              return false;
++
++      /*
++       * For high-order allocations, there are two stall thresholds.
++       * High-cost allocations stall immediately where as lower
++       * order allocations such as stacks require the scanning
++       * priority to be much higher before stalling.
++       */
++      if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
++              lumpy_stall_priority = DEF_PRIORITY;
++      else
++              lumpy_stall_priority = DEF_PRIORITY / 3;
++
++      return priority <= lumpy_stall_priority;
++}
++
++/*
+  * shrink_inactive_list() is a helper for shrink_zone().  It returns the number
+  * of reclaimed pages
+  */
+@@ -1047,7 +1094,7 @@ static unsigned long shrink_inactive_lis
+               unsigned long nr_freed;
+               unsigned long nr_active;
+               unsigned int count[NR_LRU_LISTS] = { 0, };
+-              int mode = ISOLATE_INACTIVE;
++// use lumpy  int mode = ISOLATE_INACTIVE;
+               /*
+                * If we need a large contiguous chunk of memory, or have
+@@ -1056,13 +1103,11 @@ static unsigned long shrink_inactive_lis
+                *
+                * We use the same threshold as pageout congestion_wait below.
+                */
+-              if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
+-                      mode = ISOLATE_BOTH;
+-              else if (sc->order && priority < DEF_PRIORITY - 2)
+-                      mode = ISOLATE_BOTH;
+               nr_taken = sc->isolate_pages(sc->swap_cluster_max,
+-                           &page_list, &nr_scan, sc->order, mode,
++                           &page_list, &nr_scan, sc->order, 
++                              sc->lumpy_reclaim_mode ?
++                                      ISOLATE_BOTH : ISOLATE_INACTIVE,
+                               zone, sc->mem_cgroup, 0, file);
+               nr_active = clear_active_flags(&page_list, count);
+               __count_vm_events(PGDEACTIVATE, nr_active);
+@@ -1088,16 +1133,8 @@ static unsigned long shrink_inactive_lis
+               nr_scanned += nr_scan;
+               nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
+-              /*
+-               * If we are direct reclaiming for contiguous pages and we do
+-               * not reclaim everything in the list, try again and wait
+-               * for IO to complete. This will stall high-order allocations
+-               * but that should be acceptable to the caller
+-               */
+-              if (nr_freed < nr_taken && !current_is_kswapd() &&
+-                                      sc->order > PAGE_ALLOC_COSTLY_ORDER) {
+-                      congestion_wait(WRITE, HZ/10);
+-
++              /* Check if we should syncronously wait for writeback */
++              if (should_reclaim_stall(nr_taken, nr_freed, priority, sc)) {
+                       /*
+                        * The attempt at page out may have made some
+                        * of the pages active, mark them inactive again.
+@@ -1404,6 +1441,20 @@ static void get_scan_ratio(struct zone *
+       percent[1] = 100 - percent[0];
+ }
++static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc)
++{
++      /*
++      * If we need a large contiguous chunk of memory, or have
++      * trouble getting a small set of contiguous pages, we
++      * will reclaim both active and inactive pages.
++      */
++      if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
++              sc->lumpy_reclaim_mode = 1;
++      else if (sc->order && priority < DEF_PRIORITY - 2)
++              sc->lumpy_reclaim_mode = 1;
++      else
++              sc->lumpy_reclaim_mode = 0;
++}
+ /*
+  * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
+@@ -1419,6 +1470,8 @@ static unsigned long shrink_zone(int pri
+       get_scan_ratio(zone, sc, percent);
++      set_lumpy_reclaim_mode(priority, sc);
++
+       for_each_evictable_lru(l) {
+               if (scan_global_lru(sc)) {
+                       int file = is_file_lru(l);
diff --git a/kernel-power-2.6.28/debian/patches/wl12xx-rx-fix.diff b/kernel-power-2.6.28/debian/patches/wl12xx-rx-fix.diff
new file mode 100644 (file)
index 0000000..2107415
--- /dev/null
@@ -0,0 +1,14 @@
+diff -ru a/drivers/net/wireless/wl12xx/wl1251_rx.c b/drivers/net/wireless/wl12xx/wl1251_rx.c
+--- a/drivers/net/wireless/wl12xx/wl1251_rx.c  2011-01-22 17:33:06.966780985 +1100
++++ b/drivers/net/wireless/wl12xx/wl1251_rx.c  2011-01-22 17:17:36.992422987 +1100
+@@ -185,8 +185,8 @@
+       rx_buffer = skb_put(skb, length);
+       wl1251_spi_mem_read(wl, rx_packet_ring_addr, rx_buffer, length);
+-      /* The actual lenght doesn't include the target's alignment */
+-      skb->len = desc->length  - PLCP_HEADER_LENGTH;
++      /* The actual length doesn't include the target's alignment */
++      skb_trim(skb, desc->length - PLCP_HEADER_LENGTH);
+       fc = (u16 *)skb->data;