Critical and minor fixes for -ck swappiness patches (make_swappiness_really_mean_it...
[kernel-bfs] / kernel-bfs-2.6.28 / debian / patches / mm-make_swappiness_really_mean_it.patch
index 16a2ac0..b251615 100644 (file)
@@ -1,29 +1,44 @@
-Swappiness the tunable lies. It doesn't respect swappiness because it alters
-the value when we're more than lightly loaded in the vm. Change it to -really-
-mean swappiness unless we're about to go out of memory.
-
--ck
----
- mm/vmscan.c |    7 ++++++-
- 1 file changed, 6 insertions(+), 1 deletion(-)
-
-Index: linux-2.6.34-ck1/mm/vmscan.c
-===================================================================
---- linux-2.6.34-ck1.orig/mm/vmscan.c  2010-05-18 12:24:33.974319780 +1000
-+++ linux-2.6.34-ck1/mm/vmscan.c       2010-05-18 12:26:16.233444880 +1000
-@@ -1633,6 +1633,7 @@ static void shrink_zone(int priority, st
+--- linux-2.6.28/mm/vmscan.c   2008-12-25 00:26:37.000000000 +0100
++++ linux-2.6.28.new/mm/vmscan.c       2011-06-02 13:51:01.615325087 +0200
+@@ -1342,13 +1342,6 @@ static void get_scan_ratio(struct zone *
+               zone_page_state(zone, NR_INACTIVE_FILE);
+       free  = zone_page_state(zone, NR_FREE_PAGES);
+-      /* If we have no swap space, do not bother scanning anon pages. */
+-      if (nr_swap_pages <= 0) {
+-              percent[0] = 0;
+-              percent[1] = 100;
+-              return;
+-      }
+-
+       /* If we have very few page cache pages, force-scan anon pages. */
+       if (unlikely(file + free <= zone->pages_high)) {
+               percent[0] = 100;
+@@ -1416,8 +1409,16 @@ static unsigned long shrink_zone(int pri
        unsigned long nr_reclaimed = 0;
        unsigned long percent[2];       /* anon @ 0; file @ 1 */
        enum lru_list l;
 +      int tmp_priority;
++      int noswap = 0;
  
-       get_scan_ratio(zone, sc, percent);
+-      get_scan_ratio(zone, sc, percent);
++      /* If we have no swap space, do not bother scanning anon pages. */
++      if (!sc->may_swap || (nr_swap_pages <= 0)) {
++              noswap = 1;
++              percent[0] = 0;
++              percent[1] = 100;
++      } else
++              get_scan_ratio(zone, sc, percent);
  
-@@ -1648,7 +1649,11 @@ static void shrink_zone(int priority, st
+       for_each_evictable_lru(l) {
+               if (scan_global_lru(sc)) {
+@@ -1425,8 +1426,12 @@ static unsigned long shrink_zone(int pri
+                       int scan;
  
                        scan = zone_page_state(zone, NR_LRU_BASE + l);
-                       if (priority) {
+-                      if (priority) {
 -                              scan >>= priority;
++                      if (priority || noswap) {
 +                              tmp_priority = priority;
 +
 +                              if (file && priority > 0)
@@ -32,4 +47,3 @@ Index: linux-2.6.34-ck1/mm/vmscan.c
                                scan = (scan * percent[file]) / 100;
                        }
                        zone->lru[l].nr_scan += scan;
-