Sync this up with Andrea's patches.


---

 25-akpm/include/linux/rmap.h |    4 ++--
 25-akpm/mm/rmap.c            |    8 ++++----
 25-akpm/mm/vmscan.c          |   20 ++++++++++----------
 3 files changed, 16 insertions(+), 16 deletions(-)

diff -puN include/linux/rmap.h~rename-rmap_lock include/linux/rmap.h
--- 25/include/linux/rmap.h~rename-rmap_lock	2004-05-12 01:54:40.497740960 -0700
+++ 25-akpm/include/linux/rmap.h	2004-05-12 02:09:37.081439520 -0700
@@ -8,9 +8,9 @@
 #include <linux/config.h>
 #include <linux/linkage.h>
 
-#define rmap_lock(page) \
+#define page_map_lock(page) \
 	bit_spin_lock(PG_maplock, (unsigned long *)&(page)->flags)
-#define rmap_unlock(page) \
+#define page_map_unlock(page) \
 	bit_spin_unlock(PG_maplock, (unsigned long *)&(page)->flags)
 
 #ifdef CONFIG_MMU
diff -puN mm/vmscan.c~rename-rmap_lock mm/vmscan.c
--- 25/mm/vmscan.c~rename-rmap_lock	2004-05-12 01:54:40.514738376 -0700
+++ 25-akpm/mm/vmscan.c	2004-05-12 01:55:50.454105976 -0700
@@ -276,11 +276,11 @@ shrink_list(struct list_head *page_list,
 		if (PageWriteback(page))
 			goto keep_locked;
 
-		rmap_lock(page);
+		page_map_lock(page);
 		referenced = page_referenced(page);
 		if (referenced && page_mapping_inuse(page)) {
 			/* In active use or really unfreeable.  Activate it. */
-			rmap_unlock(page);
+			page_map_unlock(page);
 			goto activate_locked;
 		}
 
@@ -295,10 +295,10 @@ shrink_list(struct list_head *page_list,
 		 * XXX: implement swap clustering ?
 		 */
 		if (PageAnon(page) && !PageSwapCache(page)) {
-			rmap_unlock(page);
+			page_map_unlock(page);
 			if (!add_to_swap(page))
 				goto activate_locked;
-			rmap_lock(page);
+			page_map_lock(page);
 		}
 		if (PageSwapCache(page)) {
 			mapping = &swapper_space;
@@ -313,16 +313,16 @@ shrink_list(struct list_head *page_list,
 		if (page_mapped(page) && mapping) {
 			switch (try_to_unmap(page)) {
 			case SWAP_FAIL:
-				rmap_unlock(page);
+				page_map_unlock(page);
 				goto activate_locked;
 			case SWAP_AGAIN:
-				rmap_unlock(page);
+				page_map_unlock(page);
 				goto keep_locked;
 			case SWAP_SUCCESS:
 				; /* try to free the page below */
 			}
 		}
-		rmap_unlock(page);
+		page_map_unlock(page);
 
 		/*
 		 * If the page is dirty, only perform writeback if that write
@@ -656,13 +656,13 @@ refill_inactive_zone(struct zone *zone, 
 				list_add(&page->lru, &l_active);
 				continue;
 			}
-			rmap_lock(page);
+			page_map_lock(page);
 			if (page_referenced(page)) {
-				rmap_unlock(page);
+				page_map_unlock(page);
 				list_add(&page->lru, &l_active);
 				continue;
 			}
-			rmap_unlock(page);
+			page_map_unlock(page);
 		}
 		/*
 		 * FIXME: need to consider page_count(page) here if/when we
diff -puN mm/rmap.c~rename-rmap_lock mm/rmap.c
--- 25/mm/rmap.c~rename-rmap_lock	2004-05-12 01:54:40.529736096 -0700
+++ 25-akpm/mm/rmap.c	2004-05-12 02:09:37.086438760 -0700
@@ -186,7 +186,7 @@ page_add_rmap(struct page *page, pte_t *
 	if (PageReserved(page))
 		return pte_chain;
 
-	rmap_lock(page);
+	page_map_lock(page);
 
 	if (page->pte.direct == 0) {
 		page->pte.direct = pte_paddr;
@@ -223,7 +223,7 @@ page_add_rmap(struct page *page, pte_t *
 	cur_pte_chain->ptes[pte_chain_idx(cur_pte_chain) - 1] = pte_paddr;
 	cur_pte_chain->next_and_idx--;
 out:
-	rmap_unlock(page);
+	page_map_unlock(page);
 	return pte_chain;
 }
 
@@ -245,7 +245,7 @@ void fastcall page_remove_rmap(struct pa
 	if (!pfn_valid(page_to_pfn(page)) || PageReserved(page))
 		return;
 
-	rmap_lock(page);
+	page_map_lock(page);
 
 	if (!page_mapped(page))
 		goto out_unlock;	/* remap_page_range() from a driver? */
@@ -294,7 +294,7 @@ out:
 		dec_page_state(nr_mapped);
 	}
 out_unlock:
-	rmap_unlock(page);
+	page_map_unlock(page);
 }
 
 /**

_