-While it may be nice to have a copy of pages on swap once written there, the
-more garbage we leave in the swapspace the slower any further writes and
-reads to and from it are. Just free swapcache whenever we can.
-
--ck
-
----
- include/linux/swap.h | 2 +-
- mm/memory.c | 2 +-
- mm/swapfile.c | 9 ++++-----
- mm/vmscan.c | 2 +-
- 4 files changed, 7 insertions(+), 8 deletions(-)
-
-Index: linux-2.6.34-ck1/mm/memory.c
-===================================================================
---- linux-2.6.34-ck1.orig/mm/memory.c 2010-05-18 12:24:33.852194874 +1000
-+++ linux-2.6.34-ck1/mm/memory.c 2010-05-18 12:26:16.646319673 +1000
-@@ -2713,7 +2713,7 @@ static int do_swap_page(struct mm_struct
+--- linux-2.6.28/include/linux/swap.h 2011-06-02 13:58:40.469430998 +0200
++++ linux-2.6.28.new/include/linux/swap.h 2011-06-02 14:01:57.427730997 +0200
+@@ -186,7 +186,7 @@ struct swap_list_t {
+ int next; /* swapfile to be used next */
+ };
+
+-/* Swap 50% full? Release swapcache more aggressively.. */
++/* Swap 50% full? */
+ #define vm_swap_full() (nr_swap_pages*2 < total_swap_pages)
+
+ /* linux/mm/page_alloc.c */
+--- linux-2.6.28/mm/memory.c 2011-06-02 13:58:36.577820199 +0200
++++ linux-2.6.28.new/mm/memory.c 2011-06-02 14:01:57.423731396 +0200
+@@ -2356,7 +2356,7 @@ static int do_swap_page(struct mm_struct
page_add_anon_rmap(page, vma, address);
swap_free(entry);
remove_exclusive_swap_page(page);
unlock_page(page);
-Index: linux-2.6.34-ck1/mm/swapfile.c
-===================================================================
-@@ -712,8 +712,7 @@ int free_swap_and_cache(swp_entry_t entr
- one_user = (page_count(page) == 2);
- /* Only cache user (+us), or swap space full? Free it! */
+--- linux-2.6.28/mm/swapfile.c 2011-06-02 13:58:36.581819801 +0200
++++ linux-2.6.28.new/mm/swapfile.c 2011-06-02 14:03:09.848109234 +0200
+@@ -449,14 +449,9 @@ void free_swap_and_cache(swp_entry_t ent
+ spin_unlock(&swap_lock);
+ }
+ if (page) {
+- int one_user;
+-
+- BUG_ON(PagePrivate(page));
+- one_user = (page_count(page) == 2);
+- /* Only cache user (+us), or swap space full? Free it! */
++ /* Not mapped elsewhere, or swap space full? Free it! */
/* Also recheck PageSwapCache after page is locked (above) */
- if (PageSwapCache(page) && !PageWriteback(page) &&
- (one_user || vm_swap_full())) {
delete_from_swap_cache(page);
SetPageDirty(page);
}
-Index: linux-2.6.34-ck1/mm/vmscan.c
-===================================================================
---- linux-2.6.34-ck1.orig/mm/vmscan.c 2010-05-18 12:26:16.371569589 +1000
-+++ linux-2.6.34-ck1/mm/vmscan.c 2010-05-18 12:26:16.647319427 +1000
-@@ -821,7 +821,7 @@ cull_mlocked:
+--- linux-2.6.28/mm/vmscan.c 2011-06-02 13:58:40.341443799 +0200
++++ linux-2.6.28.new/mm/vmscan.c 2011-06-02 14:05:27.848682722 +0200
+@@ -758,7 +758,7 @@ cull_mlocked:
activate_locked:
/* Not a candidate for swapping, so reclaim swap space. */
remove_exclusive_swap_page_ref(page);
VM_BUG_ON(PageActive(page));
SetPageActive(page);
-Index: linux-2.6.34-ck1/include/linux/swap.h
-===================================================================
---- linux-2.6.34-ck1.orig/include/linux/swap.h 2010-05-18 12:26:16.508569731 +1000
-+++ linux-2.6.34-ck1/include/linux/swap.h 2010-05-18 12:26:16.647319427 +1000
-@@ -189,7 +189,7 @@ struct swap_list_t {
- int next; /* swapfile to be used next */
- };
+@@ -1296,9 +1296,7 @@ static void shrink_active_list(unsigned
+ __count_zone_vm_events(PGREFILL, zone, pgscanned);
+ __count_vm_events(PGDEACTIVATE, pgdeactivate);
+ spin_unlock_irq(&zone->lru_lock);
+- if (vm_swap_full())
+- pagevec_swap_free(&pvec);
+-
++ pagevec_swap_free(&pvec);
+ pagevec_release(&pvec);
+ }
--/* Swap 50% full? Release swapcache more aggressively.. */
-+/* Swap 50% full? */
- #define vm_swap_full() (nr_swap_pages*2 < total_swap_pages)
-
- /* linux/mm/page_alloc.c */
-
-Swappiness the tunable lies. It doesn't respect swappiness because it alters
-the value when we're more than lightly loaded in the vm. Change it to -really-
-mean swappiness unless we're about to go out of memory.
-
--ck
----
- mm/vmscan.c | 7 ++++++-
- 1 file changed, 6 insertions(+), 1 deletion(-)
-
-Index: linux-2.6.34-ck1/mm/vmscan.c
-===================================================================
---- linux-2.6.34-ck1.orig/mm/vmscan.c 2010-05-18 12:24:33.974319780 +1000
-+++ linux-2.6.34-ck1/mm/vmscan.c 2010-05-18 12:26:16.233444880 +1000
-@@ -1633,6 +1633,7 @@ static void shrink_zone(int priority, st
+--- linux-2.6.28/mm/vmscan.c 2008-12-25 00:26:37.000000000 +0100
++++ linux-2.6.28.new/mm/vmscan.c 2011-06-02 13:51:01.615325087 +0200
+@@ -1342,13 +1342,6 @@ static void get_scan_ratio(struct zone *
+ zone_page_state(zone, NR_INACTIVE_FILE);
+ free = zone_page_state(zone, NR_FREE_PAGES);
+
+- /* If we have no swap space, do not bother scanning anon pages. */
+- if (nr_swap_pages <= 0) {
+- percent[0] = 0;
+- percent[1] = 100;
+- return;
+- }
+-
+ /* If we have very few page cache pages, force-scan anon pages. */
+ if (unlikely(file + free <= zone->pages_high)) {
+ percent[0] = 100;
+@@ -1416,8 +1409,16 @@ static unsigned long shrink_zone(int pri
unsigned long nr_reclaimed = 0;
unsigned long percent[2]; /* anon @ 0; file @ 1 */
enum lru_list l;
+ int tmp_priority;
++ int noswap = 0;
- get_scan_ratio(zone, sc, percent);
+- get_scan_ratio(zone, sc, percent);
++ /* If we have no swap space, do not bother scanning anon pages. */
++ if (!sc->may_swap || (nr_swap_pages <= 0)) {
++ noswap = 1;
++ percent[0] = 0;
++ percent[1] = 100;
++ } else
++ get_scan_ratio(zone, sc, percent);
-@@ -1648,7 +1649,11 @@ static void shrink_zone(int priority, st
+ for_each_evictable_lru(l) {
+ if (scan_global_lru(sc)) {
+@@ -1425,8 +1426,12 @@ static unsigned long shrink_zone(int pri
+ int scan;
scan = zone_page_state(zone, NR_LRU_BASE + l);
- if (priority) {
+- if (priority) {
- scan >>= priority;
++ if (priority || noswap) {
+ tmp_priority = priority;
+
+ if (file && priority > 0)
scan = (scan * percent[file]) / 100;
}
zone->lru[l].nr_scan += scan;
-