--- /dev/null
+The vm currently performs scanning when allocating ram once the watermarks
+are below the pages_low value and tries to restore them to the pages_high
+watermark. The disadvantage of this is that we are scanning most aggresssively
+at the same time we are allocating ram regardless of the stress the vm is
+under. Add a pages_lots watermark and allow the watermark to be relaxed
+according to the stress the vm is at the time (according to the priority
+value). Thus we have more in reserve next time we are allocating ram and end
+up scanning less aggresssively. Note the actual pages_lots isn't used directly
+in this code.
+
+Signed-off-by: Con Kolivas <kernel@kolivas.org>
+
+ include/linux/mmzone.h | 2 +-
+ mm/page_alloc.c | 3 +++
+ mm/vmscan.c | 17 ++++++++++++++---
+ 3 files changed, 18 insertions(+), 4 deletions(-)
+
+Index: linux-2.6.22-ck1/include/linux/mmzone.h
+===================================================================
+--- linux-2.6.22-ck1.orig/include/linux/mmzone.h 2007-07-09 18:44:34.000000000 +1000
++++ linux-2.6.22-ck1/include/linux/mmzone.h 2007-07-09 18:44:39.000000000 +1000
+@@ -181,7 +181,7 @@ enum zone_type {
+
+ struct zone {
+ /* Fields commonly accessed by the page allocator */
+- unsigned long pages_min, pages_low, pages_high;
++ unsigned long pages_min, pages_low, pages_high, pages_lots;
+ /*
+ * We don't know if the memory that we're going to allocate will be freeable
+ * or/and it will be released eventually, so to avoid totally wasting several
+Index: linux-2.6.22-ck1/mm/page_alloc.c
+===================================================================
+--- linux-2.6.22-ck1.orig/mm/page_alloc.c 2007-07-09 18:44:34.000000000 +1000
++++ linux-2.6.22-ck1/mm/page_alloc.c 2007-07-09 18:44:39.000000000 +1000
+@@ -1570,6 +1570,7 @@ void show_free_areas(void)
+ " min:%lukB"
+ " low:%lukB"
+ " high:%lukB"
++ " lots:%lukB"
+ " active_anon:%lukB"
+ " inactive_anon:%lukB"
+ " active_file:%lukB"
+@@ -1581,6 +1582,7 @@ void show_free_areas(void)
+ K(zone->pages_min),
+ K(zone->pages_low),
+ K(zone->pages_high),
++ K(zone->pages_lots),
+ K(zone_page_state(zone, NR_ACTIVE_ANON)),
+ K(zone_page_state(zone, NR_INACTIVE_ANON)),
+ K(zone_page_state(zone, NR_ACTIVE_FILE)),
+@@ -3142,6 +3144,7 @@ void setup_per_zone_pages_min(void)
+
+ zone->pages_low = zone->pages_min + (tmp >> 2);
+ zone->pages_high = zone->pages_min + (tmp >> 1);
++ zone->pages_lots = zone->pages_min + tmp;
+ setup_zone_migrate_reserve(zone);
+ spin_unlock_irqrestore(&zone->lock, flags);
+ }
+Index: linux-2.6.22-ck1/mm/vmscan.c
+===================================================================
+--- linux-2.6.22-ck1.orig/mm/vmscan.c 2007-07-09 18:44:39.000000000 +1000
++++ linux-2.6.22-ck1/mm/vmscan.c 2007-07-09 18:44:39.000000000 +1000
+@@ -1171,6 +1171,7 @@ loop_again:
+ */
+ for (i = pgdat->nr_zones - 1; i >= 0; i--) {
+ struct zone *zone = pgdat->node_zones + i;
++ unsigned long watermark;
+
+ if (!populated_zone(zone))
+ continue;
+@@ -1178,8 +1179,14 @@ loop_again:
+ shrink_active_list(SWAP_CLUSTER_MAX, zone,
+ &sc, priority, 0);
+
+- if (!zone_watermark_ok(zone, order, zone->pages_high,
+- 0, 0)) {
++ /*
++ * The watermark is relaxed depending on the
++ * level of "priority" till it drops to
++ * pages_high.
++ */
++ watermark = zone->pages_high + (zone->pages_high *
++ priority / DEF_PRIORITY);
++ if (!zone_watermark_ok(zone, order, watermark, 0, 0)) {
+ end_zone = i;
+ break;
+ }
+@@ -1206,6 +1213,7 @@ loop_again:
+ for (i = 0; i <= end_zone; i++) {
+ struct zone *zone = pgdat->node_zones + i;
+ int nr_slab;
++ unsigned long watermark;
+
+ if (!populated_zone(zone))
+ continue;
+@@ -1213,7 +1221,10 @@ loop_again:
+ priority != DEF_PRIORITY)
+ continue;
+
+- if (!zone_watermark_ok(zone, order, zone->pages_high,
++ watermark = zone->pages_high + (zone->pages_high *
++ priority / DEF_PRIORITY);
++
++ if (!zone_watermark_ok(zone, order, watermark,
+ end_zone, 0))
+ all_zones_ok = 0;
+ temp_priority[i] = priority;
+