From: Hugh Dickins <hugh@veritas.com>



---

 25-akpm/arch/parisc/kernel/cache.c |   43 +++++++++++++------------------------
 1 files changed, 16 insertions(+), 27 deletions(-)

diff -puN arch/parisc/kernel/cache.c~rmap-19-arch-prio_tree-parisc arch/parisc/kernel/cache.c
--- 25/arch/parisc/kernel/cache.c~rmap-19-arch-prio_tree-parisc	Wed May 12 14:40:31 2004
+++ 25-akpm/arch/parisc/kernel/cache.c	Wed May 12 14:40:31 2004
@@ -17,6 +17,7 @@
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/seq_file.h>
+#include <linux/pagemap.h>
 
 #include <asm/pdc.h>
 #include <asm/cache.h>
@@ -230,30 +231,27 @@ void disable_sr_hashing(void)
 void __flush_dcache_page(struct page *page)
 {
 	struct address_space *mapping = page_mapping(page);
-	struct list_head *l;
+	struct vm_area_struct *mpnt = NULL;
+	struct prio_tree_iter iter;
+	unsigned long offset;
+	unsigned long addr;
+	pgoff_t pgoff;
 
 	flush_kernel_dcache_page(page_address(page));
 
 	if (!mapping)
 		return;
 
+	pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+
 	/* We have ensured in arch_get_unmapped_area() that all shared
 	 * mappings are mapped at equivalent addresses, so we only need
 	 * to flush one for them all to become coherent */
-	list_for_each(l, &mapping->i_mmap_shared) {
-		struct vm_area_struct *mpnt;
-		unsigned long off, addr;
-
-		mpnt = list_entry(l, struct vm_area_struct, shared);
-
-		if (page->index < mpnt->vm_pgoff)
-			continue;
-
-		off = page->index - mpnt->vm_pgoff;
-		if (off >= (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT)
-			continue;
 
-		addr = mpnt->vm_start + (off << PAGE_SHIFT);
+	while ((mpnt = vma_prio_tree_next(mpnt, &mapping->i_mmap_shared,
+					&iter, pgoff, pgoff)) != NULL) {
+		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
+		addr = mpnt->vm_start + offset;
 
 		/* flush instructions produce non access tlb misses.
 		 * On PA, we nullify these instructions rather than 
@@ -276,20 +274,11 @@ void __flush_dcache_page(struct page *pa
 	 * *any* mappings of a file are always congruently mapped (whether
 	 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
 	 * to flush one address here too */
-	list_for_each(l, &mapping->i_mmap) {
-		struct vm_area_struct *mpnt;
-		unsigned long off, addr;
-
-		mpnt = list_entry(l, struct vm_area_struct, shared);
-
-		if (page->index < mpnt->vm_pgoff)
-			continue;
-
-		off = page->index - mpnt->vm_pgoff;
-		if (off >= (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT)
-			continue;
 
-		addr = mpnt->vm_start + (off << PAGE_SHIFT);
+	while ((mpnt = vma_prio_tree_next(mpnt, &mapping->i_mmap,
+					&iter, pgoff, pgoff)) != NULL) {
+		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
+		addr = mpnt->vm_start + offset;
 
 		/* This is just for speed.  If the page translation isn't
 		 * there there's no point exciting the nadtlb handler into

_