From: Andi Kleen <ak@suse.de>

sh64		converted, not compile tested.

Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 25-akpm/arch/sh64/mm/cache.c           |    4 ++--
 25-akpm/arch/sh64/mm/fault.c           |    2 +-
 25-akpm/arch/sh64/mm/hugetlbpage.c     |    4 ++--
 25-akpm/arch/sh64/mm/init.c            |   18 +++++++++++++++++-
 25-akpm/arch/sh64/mm/ioremap.c         |    6 +++---
 25-akpm/arch/sh64/mm/tlbmiss.c         |    4 ++--
 25-akpm/include/asm-sh64/mmu_context.h |    4 ++--
 25-akpm/include/asm-sh64/page.h        |    2 ++
 25-akpm/include/asm-sh64/pgalloc.h     |   20 ++------------------
 25-akpm/include/asm-sh64/pgtable.h     |    8 +++-----
 10 files changed, 36 insertions(+), 36 deletions(-)

diff -puN arch/sh64/mm/cache.c~4level-architecture-changes-for-sh64 arch/sh64/mm/cache.c
--- 25/arch/sh64/mm/cache.c~4level-architecture-changes-for-sh64	2004-11-03 21:53:26.376058304 -0800
+++ 25-akpm/arch/sh64/mm/cache.c	2004-11-03 21:53:26.393055720 -0800
@@ -581,7 +581,7 @@ static void sh64_dcache_purge_virt_page(
 	pte_t *pte;
 	pte_t entry;
 
-	pgd = pgd_offset(mm, eaddr);
+	pgd = pml4_pgd_offset(pml4_offset(mm, eaddr),eaddr);
 	pmd = pmd_offset(pgd, eaddr);
 
 	if (pmd_none(*pmd) || pmd_bad(*pmd))
@@ -610,7 +610,7 @@ static void sh64_dcache_purge_user_page(
 	   following page table traversal is safe even on SMP/pre-emptible. */
 
 	if (!mm) return; /* No way to find physical address of page */
-	pgd = pgd_offset(mm, eaddr);
+	pgd = pml4_pgd_offset(pml4_offset(mm, eaddr), eaddr);
 	if (pgd_bad(*pgd)) return;
 
 	pmd = pmd_offset(pgd, eaddr);
diff -puN arch/sh64/mm/fault.c~4level-architecture-changes-for-sh64 arch/sh64/mm/fault.c
--- 25/arch/sh64/mm/fault.c~4level-architecture-changes-for-sh64	2004-11-03 21:53:26.377058152 -0800
+++ 25-akpm/arch/sh64/mm/fault.c	2004-11-03 21:53:26.394055568 -0800
@@ -92,7 +92,7 @@ static pte_t *lookup_pte(struct mm_struc
 	pte_t *pte;
 	pte_t entry;
 
-	dir = pgd_offset(mm, address);
+	dir = pml4_pgd_offset(pml4_offset(mm, address), address);
 	if (pgd_none(*dir)) {
 		return NULL;
 	}
diff -puN arch/sh64/mm/hugetlbpage.c~4level-architecture-changes-for-sh64 arch/sh64/mm/hugetlbpage.c
--- 25/arch/sh64/mm/hugetlbpage.c~4level-architecture-changes-for-sh64	2004-11-03 21:53:26.379057848 -0800
+++ 25-akpm/arch/sh64/mm/hugetlbpage.c	2004-11-03 21:53:26.395055416 -0800
@@ -30,7 +30,7 @@ static pte_t *huge_pte_alloc(struct mm_s
 	pmd_t *pmd;
 	pte_t *pte = NULL;
 
-	pgd = pgd_offset(mm, addr);
+	pgd = pml4_pgd_offset(pml4_offset(mm, addr), addr);
 	if (pgd) {
 		pmd = pmd_alloc(mm, pgd, addr);
 		if (pmd)
@@ -45,7 +45,7 @@ static pte_t *huge_pte_offset(struct mm_
 	pmd_t *pmd;
 	pte_t *pte = NULL;
 
-	pgd = pgd_offset(mm, addr);
+	pgd = pml4_pgd_offset(pml4_offset(mm, addr), addr);
 	if (pgd) {
 		pmd = pmd_offset(pgd, addr);
 		if (pmd)
diff -puN arch/sh64/mm/init.c~4level-architecture-changes-for-sh64 arch/sh64/mm/init.c
--- 25/arch/sh64/mm/init.c~4level-architecture-changes-for-sh64	2004-11-03 21:53:26.380057696 -0800
+++ 25-akpm/arch/sh64/mm/init.c	2004-11-03 21:53:26.395055416 -0800
@@ -32,7 +32,7 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_ga
  * Cache of MMU context last used.
  */
 unsigned long mmu_context_cache;
-pgd_t * mmu_pdtp_cache;
+pml4_t * mmu_pdtp_cache;
 int after_bootmem = 0;
 
 /*
@@ -197,3 +197,19 @@ void free_initrd_mem(unsigned long start
 }
 #endif
 
+pgd_t *__pgd_alloc(struct mm_struct *mm, pml4_t *pml4, unsigned long addr)
+{
+	unsigned long *ret;
+
+	if ((ret = pgd_quicklist) != NULL) {
+		pgd_quicklist = (unsigned long *)(*ret);
+		ret[0] = 0;
+		pgtable_cache_size--;
+	} else
+		ret = (unsigned long *)get_pgd_slow();
+
+	if (ret) {
+		memset(ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+	}
+	return (pgd_t *)ret;
+}
diff -puN arch/sh64/mm/ioremap.c~4level-architecture-changes-for-sh64 arch/sh64/mm/ioremap.c
--- 25/arch/sh64/mm/ioremap.c~4level-architecture-changes-for-sh64	2004-11-03 21:53:26.382057392 -0800
+++ 25-akpm/arch/sh64/mm/ioremap.c	2004-11-03 21:53:26.396055264 -0800
@@ -97,7 +97,7 @@ static int remap_area_pages(unsigned lon
 	unsigned long end = address + size;
 
 	phys_addr -= address;
-	dir = pgd_offset_k(address);
+	dir = pml4_pgd_offset_k(pml4_offset_k(address), address);
 	flush_cache_all();
 	if (address >= end)
 		BUG();
@@ -355,7 +355,7 @@ static void shmedia_mapioaddr(unsigned l
 
 	pr_debug("shmedia_mapiopage pa %08lx va %08lx\n",  pa, va);
 
-	pgdp = pgd_offset_k(va);
+	pgdp = pml4_pgd_offset_k(pml4_offset_k(va), va);
 	if (pgd_none(*pgdp) || !pgd_present(*pgdp)) {
 		pmdp = (pmd_t *)sh64_get_page();
 		set_pgd(pgdp, __pgd((unsigned long)pmdp | _KERNPG_TABLE));
@@ -388,7 +388,7 @@ static void shmedia_unmapioaddr(unsigned
 	pmd_t *pmdp;
 	pte_t *ptep;
 
-	pgdp = pgd_offset_k(vaddr);
+	pgdp = pml4_pgd_offset_k(pml4_offset_k(vaddr), vaddr);
 	pmdp = pmd_offset(pgdp, vaddr);
 
 	if (pmd_none(*pmdp) || pmd_bad(*pmdp))
diff -puN arch/sh64/mm/tlbmiss.c~4level-architecture-changes-for-sh64 arch/sh64/mm/tlbmiss.c
--- 25/arch/sh64/mm/tlbmiss.c~4level-architecture-changes-for-sh64	2004-11-03 21:53:26.384057088 -0800
+++ 25-akpm/arch/sh64/mm/tlbmiss.c	2004-11-03 21:53:26.396055264 -0800
@@ -99,7 +99,7 @@ static int handle_vmalloc_fault(struct m
 	static pte_t *pte;
 	pte_t entry;
 
-	dir = pgd_offset_k(address);
+	dir = pml4_pgd_offset_k(pml4_offset_k(address), address);
 	pmd = pmd_offset(dir, address);
 
 	if (pmd_none(*pmd)) {
@@ -150,7 +150,7 @@ static int handle_tlbmiss(struct mm_stru
 		/* upper half - never has page table entries. */
 		return 0;
 	}
-	dir = pgd_offset(mm, address);
+	dir = pml4_pgd_offset(pml4_offset(mm, address), address);
 	if (pgd_none(*dir)) {
 		return 0;
 	}
diff -puN include/asm-sh64/mmu_context.h~4level-architecture-changes-for-sh64 include/asm-sh64/mmu_context.h
--- 25/include/asm-sh64/mmu_context.h~4level-architecture-changes-for-sh64	2004-11-03 21:53:26.385056936 -0800
+++ 25-akpm/include/asm-sh64/mmu_context.h	2004-11-03 21:53:26.397055112 -0800
@@ -31,7 +31,7 @@ extern unsigned long mmu_context_cache;
 
 
 /* Current mm's pgd */
-extern pgd_t *mmu_pdtp_cache;
+extern pml4_t *mmu_pdtp_cache;
 
 #define SR_ASID_MASK		0xffffffffff00ffffULL
 #define SR_ASID_SHIFT		16
@@ -189,7 +189,7 @@ static __inline__ void switch_mm(struct 
 				 struct task_struct *tsk)
 {
 	if (prev != next) {
-		mmu_pdtp_cache = next->pgd;
+		mmu_pdtp_cache = next->pml4;
 		activate_context(next);
 	}
 }
diff -puN include/asm-sh64/page.h~4level-architecture-changes-for-sh64 include/asm-sh64/page.h
--- 25/include/asm-sh64/page.h~4level-architecture-changes-for-sh64	2004-11-03 21:53:26.386056784 -0800
+++ 25-akpm/include/asm-sh64/page.h	2004-11-03 21:53:26.397055112 -0800
@@ -130,6 +130,8 @@ extern __inline__ int get_order(unsigned
 	return order;
 }
 
+#include <asm-generic/nopml4-page.h>
+
 #endif
 
 #endif /* __KERNEL__ */
diff -puN include/asm-sh64/pgalloc.h~4level-architecture-changes-for-sh64 include/asm-sh64/pgalloc.h
--- 25/include/asm-sh64/pgalloc.h~4level-architecture-changes-for-sh64	2004-11-03 21:53:26.388056480 -0800
+++ 25-akpm/include/asm-sh64/pgalloc.h	2004-11-03 21:53:26.398054960 -0800
@@ -46,23 +46,6 @@ extern __inline__ pgd_t *get_pgd_slow(vo
 	return ret;
 }
 
-extern __inline__ pgd_t *get_pgd_fast(void)
-{
-	unsigned long *ret;
-
-	if ((ret = pgd_quicklist) != NULL) {
-		pgd_quicklist = (unsigned long *)(*ret);
-		ret[0] = 0;
-		pgtable_cache_size--;
-	} else
-		ret = (unsigned long *)get_pgd_slow();
-
-	if (ret) {
-		memset(ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
-	}
-	return (pgd_t *)ret;
-}
-
 extern __inline__ void free_pgd_fast(pgd_t *pgd)
 {
 	*(unsigned long *)pgd = (unsigned long) pgd_quicklist;
@@ -170,7 +153,6 @@ static __inline__ void pmd_free(pmd_t *p
 
 #define check_pgt_cache()		do { } while (0)
 #define pgd_free(pgd)		free_pgd_slow(pgd)
-#define pgd_alloc(mm)		get_pgd_fast()
 
 extern int do_check_pgt_cache(int, int);
 
@@ -199,4 +181,6 @@ static inline void pmd_populate(struct m
 	set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) page_address (pte)));
 }
 
+#include <asm-generic/nopml4-pgalloc.h>
+
 #endif /* __ASM_SH64_PGALLOC_H */
diff -puN include/asm-sh64/pgtable.h~4level-architecture-changes-for-sh64 include/asm-sh64/pgtable.h
--- 25/include/asm-sh64/pgtable.h~4level-architecture-changes-for-sh64	2004-11-03 21:53:26.389056328 -0800
+++ 25-akpm/include/asm-sh64/pgtable.h	2004-11-03 21:53:26.398054960 -0800
@@ -146,11 +146,8 @@ static __inline__ void pmd_set(pmd_t *pm
 
 /* To find an entry in a generic PGD. */
 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
+#define pgd_index_k(address) pgd_index(address)
 #define __pgd_offset(address) pgd_index(address)
-#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
-
-/* To find an entry in a kernel PGD. */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
 
 /*
  * PGD level access routines.
@@ -234,7 +231,7 @@ static inline pmd_t * pmd_offset(pgd_t *
 #define pte_unmap_nested(pte)	do { } while (0)
 
 /* Round it up ! */
-#define USER_PTRS_PER_PGD	((TASK_SIZE+PGDIR_SIZE-1)/PGDIR_SIZE)
+#define USER_PPGDS_IN_LAST_PML4	((TASK_SIZE+PGDIR_SIZE-1)/PGDIR_SIZE)
 #define FIRST_USER_PGD_NR	0
 
 #ifndef __ASSEMBLY__
@@ -495,5 +492,6 @@ extern void update_mmu_cache(struct vm_a
 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 
 #include <asm-generic/pgtable.h>
+#include <asm-generic/nopml4-pgtable.h>
 
 #endif /* __ASM_SH64_PGTABLE_H */
_