Critical and minor fixes for -ck swappiness patches (make_swappiness_really_mean_it...
authorPeter Hunt <peter_j_hunt@hotmail.com>
Thu, 2 Jun 2011 14:25:50 +0000 (14:25 +0000)
committerPeter Hunt <peter_j_hunt@hotmail.com>
Thu, 2 Jun 2011 14:25:50 +0000 (14:25 +0000)
kernel-bfs-2.6.28/debian/patches/mm-drop_swap_cache_aggressively.patch
kernel-bfs-2.6.28/debian/patches/mm-make_swappiness_really_mean_it.patch
kernel-bfs-2.6.28/debian/patches/mm-zero_swappiness.patch [new file with mode: 0644]
kernel-bfs-2.6.28/debian/patches/series

index 6443346..9cb60df 100644 (file)
@@ -1,21 +1,17 @@
-While it may be nice to have a copy of pages on swap once written there, the
-more garbage we leave in the swapspace the slower any further writes and
-reads to and from it are. Just free swapcache whenever we can.
-
--ck
-
----
- include/linux/swap.h |    2 +-
- mm/memory.c          |    2 +-
- mm/swapfile.c        |    9 ++++-----
- mm/vmscan.c          |    2 +-
- 4 files changed, 7 insertions(+), 8 deletions(-)
-
-Index: linux-2.6.34-ck1/mm/memory.c
-===================================================================
---- linux-2.6.34-ck1.orig/mm/memory.c  2010-05-18 12:24:33.852194874 +1000
-+++ linux-2.6.34-ck1/mm/memory.c       2010-05-18 12:26:16.646319673 +1000
-@@ -2713,7 +2713,7 @@ static int do_swap_page(struct mm_struct
+--- linux-2.6.28/include/linux/swap.h  2011-06-02 13:58:40.469430998 +0200
++++ linux-2.6.28.new/include/linux/swap.h      2011-06-02 14:01:57.427730997 +0200
+@@ -186,7 +186,7 @@ struct swap_list_t {
+       int next;       /* swapfile to be used next */
+ };
+-/* Swap 50% full? Release swapcache more aggressively.. */
++/* Swap 50% full? */
+ #define vm_swap_full() (nr_swap_pages*2 < total_swap_pages)
+ /* linux/mm/page_alloc.c */
+--- linux-2.6.28/mm/memory.c   2011-06-02 13:58:36.577820199 +0200
++++ linux-2.6.28.new/mm/memory.c       2011-06-02 14:01:57.423731396 +0200
+@@ -2356,7 +2356,7 @@ static int do_swap_page(struct mm_struct
        page_add_anon_rmap(page, vma, address);
  
        swap_free(entry);
@@ -24,11 +20,18 @@ Index: linux-2.6.34-ck1/mm/memory.c
                remove_exclusive_swap_page(page);
        unlock_page(page);
  
-Index: linux-2.6.34-ck1/mm/swapfile.c
-===================================================================
-@@ -712,8 +712,7 @@ int free_swap_and_cache(swp_entry_t entr
-               one_user = (page_count(page) == 2);
-               /* Only cache user (+us), or swap space full? Free it! */
+--- linux-2.6.28/mm/swapfile.c 2011-06-02 13:58:36.581819801 +0200
++++ linux-2.6.28.new/mm/swapfile.c     2011-06-02 14:03:09.848109234 +0200
+@@ -449,14 +449,9 @@ void free_swap_and_cache(swp_entry_t ent
+               spin_unlock(&swap_lock);
+       }
+       if (page) {
+-              int one_user;
+-
+-              BUG_ON(PagePrivate(page));
+-              one_user = (page_count(page) == 2);
+-              /* Only cache user (+us), or swap space full? Free it! */
++              /* Not mapped elsewhere, or swap space full? Free it! */
                /* Also recheck PageSwapCache after page is locked (above) */
 -              if (PageSwapCache(page) && !PageWriteback(page) &&
 -                                      (one_user || vm_swap_full())) {
@@ -36,11 +39,9 @@ Index: linux-2.6.34-ck1/mm/swapfile.c
                        delete_from_swap_cache(page);
                        SetPageDirty(page);
                }
-Index: linux-2.6.34-ck1/mm/vmscan.c
-===================================================================
---- linux-2.6.34-ck1.orig/mm/vmscan.c  2010-05-18 12:26:16.371569589 +1000
-+++ linux-2.6.34-ck1/mm/vmscan.c       2010-05-18 12:26:16.647319427 +1000
-@@ -821,7 +821,7 @@ cull_mlocked:
+--- linux-2.6.28/mm/vmscan.c   2011-06-02 13:58:40.341443799 +0200
++++ linux-2.6.28.new/mm/vmscan.c       2011-06-02 14:05:27.848682722 +0200
+@@ -758,7 +758,7 @@ cull_mlocked:
  
  activate_locked:
                /* Not a candidate for swapping, so reclaim swap space. */
@@ -49,17 +50,14 @@ Index: linux-2.6.34-ck1/mm/vmscan.c
                        remove_exclusive_swap_page_ref(page);
                VM_BUG_ON(PageActive(page));
                SetPageActive(page);
-Index: linux-2.6.34-ck1/include/linux/swap.h
-===================================================================
---- linux-2.6.34-ck1.orig/include/linux/swap.h 2010-05-18 12:26:16.508569731 +1000
-+++ linux-2.6.34-ck1/include/linux/swap.h      2010-05-18 12:26:16.647319427 +1000
-@@ -189,7 +189,7 @@ struct swap_list_t {
-       int next;       /* swapfile to be used next */
- };
+@@ -1296,9 +1296,7 @@ static void shrink_active_list(unsigned 
+       __count_zone_vm_events(PGREFILL, zone, pgscanned);
+       __count_vm_events(PGDEACTIVATE, pgdeactivate);
+       spin_unlock_irq(&zone->lru_lock);
+-      if (vm_swap_full())
+-              pagevec_swap_free(&pvec);
+-
++      pagevec_swap_free(&pvec);
+       pagevec_release(&pvec);
+ }
  
--/* Swap 50% full? Release swapcache more aggressively.. */
-+/* Swap 50% full? */
- #define vm_swap_full() (nr_swap_pages*2 < total_swap_pages)
- /* linux/mm/page_alloc.c */
-
index 16a2ac0..b251615 100644 (file)
@@ -1,29 +1,44 @@
-Swappiness the tunable lies. It doesn't respect swappiness because it alters
-the value when we're more than lightly loaded in the vm. Change it to -really-
-mean swappiness unless we're about to go out of memory.
-
--ck
----
- mm/vmscan.c |    7 ++++++-
- 1 file changed, 6 insertions(+), 1 deletion(-)
-
-Index: linux-2.6.34-ck1/mm/vmscan.c
-===================================================================
---- linux-2.6.34-ck1.orig/mm/vmscan.c  2010-05-18 12:24:33.974319780 +1000
-+++ linux-2.6.34-ck1/mm/vmscan.c       2010-05-18 12:26:16.233444880 +1000
-@@ -1633,6 +1633,7 @@ static void shrink_zone(int priority, st
+--- linux-2.6.28/mm/vmscan.c   2008-12-25 00:26:37.000000000 +0100
++++ linux-2.6.28.new/mm/vmscan.c       2011-06-02 13:51:01.615325087 +0200
+@@ -1342,13 +1342,6 @@ static void get_scan_ratio(struct zone *
+               zone_page_state(zone, NR_INACTIVE_FILE);
+       free  = zone_page_state(zone, NR_FREE_PAGES);
+-      /* If we have no swap space, do not bother scanning anon pages. */
+-      if (nr_swap_pages <= 0) {
+-              percent[0] = 0;
+-              percent[1] = 100;
+-              return;
+-      }
+-
+       /* If we have very few page cache pages, force-scan anon pages. */
+       if (unlikely(file + free <= zone->pages_high)) {
+               percent[0] = 100;
+@@ -1416,8 +1409,16 @@ static unsigned long shrink_zone(int pri
        unsigned long nr_reclaimed = 0;
        unsigned long percent[2];       /* anon @ 0; file @ 1 */
        enum lru_list l;
 +      int tmp_priority;
++      int noswap = 0;
  
-       get_scan_ratio(zone, sc, percent);
+-      get_scan_ratio(zone, sc, percent);
++      /* If we have no swap space, do not bother scanning anon pages. */
++      if (!sc->may_swap || (nr_swap_pages <= 0)) {
++              noswap = 1;
++              percent[0] = 0;
++              percent[1] = 100;
++      } else
++              get_scan_ratio(zone, sc, percent);
  
-@@ -1648,7 +1649,11 @@ static void shrink_zone(int priority, st
+       for_each_evictable_lru(l) {
+               if (scan_global_lru(sc)) {
+@@ -1425,8 +1426,12 @@ static unsigned long shrink_zone(int pri
+                       int scan;
  
                        scan = zone_page_state(zone, NR_LRU_BASE + l);
-                       if (priority) {
+-                      if (priority) {
 -                              scan >>= priority;
++                      if (priority || noswap) {
 +                              tmp_priority = priority;
 +
 +                              if (file && priority > 0)
@@ -32,4 +47,3 @@ Index: linux-2.6.34-ck1/mm/vmscan.c
                                scan = (scan * percent[file]) / 100;
                        }
                        zone->lru[l].nr_scan += scan;
-
diff --git a/kernel-bfs-2.6.28/debian/patches/mm-zero_swappiness.patch b/kernel-bfs-2.6.28/debian/patches/mm-zero_swappiness.patch
new file mode 100644 (file)
index 0000000..cd321fb
--- /dev/null
@@ -0,0 +1,22 @@
+Yet to see a desktop workload that benefits from swappiness being higher than
+zero. Make it so by default.
+
+-ck
+
+---
+ mm/vmscan.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: linux-2.6.34-ck1/mm/vmscan.c
+===================================================================
+--- linux-2.6.34-ck1.orig/mm/vmscan.c  2010-05-18 12:26:16.233444880 +1000
++++ linux-2.6.34-ck1/mm/vmscan.c       2010-05-18 12:26:16.371569589 +1000
+@@ -126,7 +126,7 @@ struct scan_control {
+ /*
+  * From 0 .. 100.  Higher means more swappy.
+  */
+-int vm_swappiness = 60;
++int vm_swappiness;
+ long vm_total_pages;  /* The total number of pages which the VM controls */
+ static LIST_HEAD(shrinker_list);
index 90fc740..3f2a6b1 100644 (file)
@@ -37,12 +37,13 @@ bfs-401-to-404.patch
 sched-latnice.patch
 sched-add-above-background-load-function.patch
 mm-make_swappiness_really_mean_it.patch
+mm-zero_swappiness.patch
 mm-enable_swaptoken_only_when_swap_full.patch
 mm-drop_swap_cache_aggressively.patch
 mm-lots_watermark.diff
 mm-kswapd_inherit_prio-1.patch
-mm-idleprio_prio-1.patch
 mm-background_scan-2.patch
+mm-idleprio_prio-1.patch
 mm-lru_cache_add_lru_tail-1.patch
 hz-raise_max.patch
 voltage_scaling_1.diff
@@ -67,4 +68,3 @@ reiser4-for-2.6.28.patch
 reiser4-2.6.28.1-fix.patch
 ext4.diff
 class10sd_dto14_fix.diff
-