-While it may be nice to have a copy of pages on swap once written there, the
-more garbage we leave in the swapspace the slower any further writes and
-reads to and from it are. Just free swapcache whenever we can.
-
--ck
-
----
- include/linux/swap.h | 2 +-
- mm/memory.c | 2 +-
- mm/swapfile.c | 9 ++++-----
- mm/vmscan.c | 2 +-
- 4 files changed, 7 insertions(+), 8 deletions(-)
-
-Index: linux-2.6.34-ck1/mm/memory.c
-===================================================================
---- linux-2.6.34-ck1.orig/mm/memory.c 2010-05-18 12:24:33.852194874 +1000
-+++ linux-2.6.34-ck1/mm/memory.c 2010-05-18 12:26:16.646319673 +1000
-@@ -2713,7 +2713,7 @@ static int do_swap_page(struct mm_struct
+--- linux-2.6.28/include/linux/swap.h 2011-06-02 13:58:40.469430998 +0200
++++ linux-2.6.28.new/include/linux/swap.h 2011-06-02 14:01:57.427730997 +0200
+@@ -186,7 +186,7 @@ struct swap_list_t {
+ int next; /* swapfile to be used next */
+ };
+
+-/* Swap 50% full? Release swapcache more aggressively.. */
++/* Swap 50% full? */
+ #define vm_swap_full() (nr_swap_pages*2 < total_swap_pages)
+
+ /* linux/mm/page_alloc.c */
+--- linux-2.6.28/mm/memory.c 2011-06-02 13:58:36.577820199 +0200
++++ linux-2.6.28.new/mm/memory.c 2011-06-02 14:01:57.423731396 +0200
+@@ -2356,7 +2356,7 @@ static int do_swap_page(struct mm_struct
page_add_anon_rmap(page, vma, address);
swap_free(entry);
remove_exclusive_swap_page(page);
unlock_page(page);
-Index: linux-2.6.34-ck1/mm/swapfile.c
-===================================================================
-@@ -712,8 +712,7 @@ int free_swap_and_cache(swp_entry_t entr
- one_user = (page_count(page) == 2);
- /* Only cache user (+us), or swap space full? Free it! */
+--- linux-2.6.28/mm/swapfile.c 2011-06-02 13:58:36.581819801 +0200
++++ linux-2.6.28.new/mm/swapfile.c 2011-06-02 14:03:09.848109234 +0200
+@@ -449,14 +449,9 @@ void free_swap_and_cache(swp_entry_t ent
+ spin_unlock(&swap_lock);
+ }
+ if (page) {
+- int one_user;
+-
+- BUG_ON(PagePrivate(page));
+- one_user = (page_count(page) == 2);
+- /* Only cache user (+us), or swap space full? Free it! */
++ /* Not mapped elsewhere, or swap space full? Free it! */
/* Also recheck PageSwapCache after page is locked (above) */
- if (PageSwapCache(page) && !PageWriteback(page) &&
- (one_user || vm_swap_full())) {
delete_from_swap_cache(page);
SetPageDirty(page);
}
-Index: linux-2.6.34-ck1/mm/vmscan.c
-===================================================================
---- linux-2.6.34-ck1.orig/mm/vmscan.c 2010-05-18 12:26:16.371569589 +1000
-+++ linux-2.6.34-ck1/mm/vmscan.c 2010-05-18 12:26:16.647319427 +1000
-@@ -821,7 +821,7 @@ cull_mlocked:
+--- linux-2.6.28/mm/vmscan.c 2011-06-02 13:58:40.341443799 +0200
++++ linux-2.6.28.new/mm/vmscan.c 2011-06-02 14:05:27.848682722 +0200
+@@ -758,7 +758,7 @@ cull_mlocked:
activate_locked:
/* Not a candidate for swapping, so reclaim swap space. */
remove_exclusive_swap_page_ref(page);
VM_BUG_ON(PageActive(page));
SetPageActive(page);
-Index: linux-2.6.34-ck1/include/linux/swap.h
-===================================================================
---- linux-2.6.34-ck1.orig/include/linux/swap.h 2010-05-18 12:26:16.508569731 +1000
-+++ linux-2.6.34-ck1/include/linux/swap.h 2010-05-18 12:26:16.647319427 +1000
-@@ -189,7 +189,7 @@ struct swap_list_t {
- int next; /* swapfile to be used next */
- };
+@@ -1296,9 +1296,7 @@ static void shrink_active_list(unsigned
+ __count_zone_vm_events(PGREFILL, zone, pgscanned);
+ __count_vm_events(PGDEACTIVATE, pgdeactivate);
+ spin_unlock_irq(&zone->lru_lock);
+- if (vm_swap_full())
+- pagevec_swap_free(&pvec);
+-
++ pagevec_swap_free(&pvec);
+ pagevec_release(&pvec);
+ }
--/* Swap 50% full? Release swapcache more aggressively.. */
-+/* Swap 50% full? */
- #define vm_swap_full() (nr_swap_pages*2 < total_swap_pages)
-
- /* linux/mm/page_alloc.c */
-