From: Nick Piggin <piggin@cyberone.com.au>

Use a "refill_counter" for inactive list scanning, similar to the one used
for active list scanning.  This batches up scanning now that we precisely
balance ratios, and don't round up the amount to be done.

No observed benefits, but I imagine it would lower the acquisition
frequency of the lru locks in some cases, and make codepaths more efficient
in general due to cache niceness.


---

 include/linux/mmzone.h |    3 ++-
 mm/page_alloc.c        |    3 ++-
 mm/vmscan.c            |   20 +++++++++++++-------
 3 files changed, 17 insertions(+), 9 deletions(-)

diff -puN include/linux/mmzone.h~vm-batch-inactive-scanning include/linux/mmzone.h
--- 25/include/linux/mmzone.h~vm-batch-inactive-scanning	2004-03-02 01:48:39.000000000 -0800
+++ 25-akpm/include/linux/mmzone.h	2004-03-02 01:48:39.000000000 -0800
@@ -76,7 +76,8 @@ struct zone {
 	spinlock_t		lru_lock;	
 	struct list_head	active_list;
 	struct list_head	inactive_list;
-	atomic_t		refill_counter;
+	atomic_t		nr_scan_active;
+	atomic_t		nr_scan_inactive;
 	unsigned long		nr_active;
 	unsigned long		nr_inactive;
 	int			all_unreclaimable; /* All pages pinned */
diff -puN mm/page_alloc.c~vm-batch-inactive-scanning mm/page_alloc.c
--- 25/mm/page_alloc.c~vm-batch-inactive-scanning	2004-03-02 01:48:39.000000000 -0800
+++ 25-akpm/mm/page_alloc.c	2004-03-02 01:48:39.000000000 -0800
@@ -1445,7 +1445,8 @@ static void __init free_area_init_core(s
 				zone_names[j], realsize, batch);
 		INIT_LIST_HEAD(&zone->active_list);
 		INIT_LIST_HEAD(&zone->inactive_list);
-		atomic_set(&zone->refill_counter, 0);
+		atomic_set(&zone->nr_scan_active, 0);
+		atomic_set(&zone->nr_scan_inactive, 0);
 		zone->nr_active = 0;
 		zone->nr_inactive = 0;
 		if (!size)
diff -puN mm/vmscan.c~vm-batch-inactive-scanning mm/vmscan.c
--- 25/mm/vmscan.c~vm-batch-inactive-scanning	2004-03-02 01:48:39.000000000 -0800
+++ 25-akpm/mm/vmscan.c	2004-03-02 01:48:39.000000000 -0800
@@ -744,6 +744,7 @@ shrink_zone(struct zone *zone, int max_s
 		int *total_scanned, struct page_state *ps)
 {
 	unsigned long ratio;
+	int count;
 
 	/*
 	 * Try to keep the active list 2/3 of the size of the cache.  And
@@ -757,23 +758,28 @@ shrink_zone(struct zone *zone, int max_s
 	 */
 	ratio = (unsigned long)SWAP_CLUSTER_MAX * zone->nr_active /
 				((zone->nr_inactive | 1) * 2);
-	atomic_add(ratio+1, &zone->refill_counter);
-	if (atomic_read(&zone->refill_counter) > SWAP_CLUSTER_MAX) {
-		int count;
-
+	atomic_add(ratio+1, &zone->nr_scan_active);
+	if (atomic_read(&zone->nr_scan_active) > SWAP_CLUSTER_MAX) {
 		/*
 		 * Don't try to bring down too many pages in one attempt.
 		 * If this fails, the caller will increase `priority' and
 		 * we'll try again, with an increased chance of reclaiming
 		 * mapped memory.
 		 */
-		count = atomic_read(&zone->refill_counter);
+		count = atomic_read(&zone->nr_scan_active);
 		if (count > SWAP_CLUSTER_MAX * 4)
 			count = SWAP_CLUSTER_MAX * 4;
-		atomic_set(&zone->refill_counter, 0);
+		atomic_set(&zone->nr_scan_active, 0);
 		refill_inactive_zone(zone, count, ps);
 	}
-	return shrink_cache(zone, gfp_mask, max_scan, total_scanned);
+
+	atomic_add(max_scan, &zone->nr_scan_inactive);
+	count = atomic_read(&zone->nr_scan_inactive);
+	if (max_scan > SWAP_CLUSTER_MAX) {
+		atomic_sub(count, &zone->nr_scan_inactive);
+		return shrink_cache(zone, gfp_mask, count, total_scanned);
+	}
+	return 0;
 }
 
 /*

_