1#ifndef _I386_PGTABLE_H
2#define _I386_PGTABLE_H
3
4
5/*
6 * The Linux memory management assumes a three-level page table setup. On
7 * the i386, we use that, but "fold" the mid level into the top-level page
8 * table, so that we physically have the same two-level page table as the
9 * i386 mmu expects.
10 *
11 * This file contains the functions and defines necessary to modify and use
12 * the i386 page table tree.
13 */
14#ifndef __ASSEMBLY__
15#include <asm/processor.h>
16#include <asm/fixmap.h>
17#include <linux/threads.h>
18#include <asm/paravirt.h>
19
20#ifndef _I386_BITOPS_H
21#include <asm/bitops.h>
22#endif
23
24#include <linux/slab.h>
25#include <linux/list.h>
26#include <linux/spinlock.h>
27
28struct mm_struct;
29struct vm_area_struct;
30
31/*
32 * ZERO_PAGE is a global shared page that is always zero: used
33 * for zero-mapped memory areas etc..
34 */
35#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
36extern unsigned long empty_zero_page[1024];
37extern pgd_t swapper_pg_dir[1024];
38extern struct kmem_cache *pmd_cache;
39extern spinlock_t pgd_lock;
40extern struct page *pgd_list;
41void check_pgt_cache(void);
42
43void pmd_ctor(void *, struct kmem_cache *, unsigned long);
44void pgtable_cache_init(void);
45void paging_init(void);
46
47
48/*
49 * The Linux x86 paging architecture is 'compile-time dual-mode', it
50 * implements both the traditional 2-level x86 page tables and the
51 * newer 3-level PAE-mode page tables.
52 */
53#ifdef CONFIG_X86_PAE
54# include <asm/pgtable-3level-defs.h>
55# define PMD_SIZE	(1UL << PMD_SHIFT)
56# define PMD_MASK	(~(PMD_SIZE-1))
57#else
58# include <asm/pgtable-2level-defs.h>
59#endif
60
61#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
62#define PGDIR_MASK	(~(PGDIR_SIZE-1))
63
64#define USER_PTRS_PER_PGD	(TASK_SIZE/PGDIR_SIZE)
65#define FIRST_USER_ADDRESS	0
66
67#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
68#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
69
70#define TWOLEVEL_PGDIR_SHIFT	22
71#define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
72#define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
73
74/* Just any arbitrary offset to the start of the vmalloc VM area: the
75 * current 8MB value just means that there will be a 8MB "hole" after the
76 * physical memory until the kernel virtual memory starts.  That means that
77 * any out-of-bounds memory accesses will hopefully be caught.
78 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
79 * area for the same reason. ;)
80 */
81#define VMALLOC_OFFSET	(8*1024*1024)
82#define VMALLOC_START	(((unsigned long) high_memory + vmalloc_earlyreserve + \
83			2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1))
84#ifdef CONFIG_HIGHMEM
85# define VMALLOC_END	(PKMAP_BASE-2*PAGE_SIZE)
86#else
87# define VMALLOC_END	(FIXADDR_START-2*PAGE_SIZE)
88#endif
89
90/*
91 * _PAGE_PSE set in the page directory entry just means that
92 * the page directory entry points directly to a 4MB-aligned block of
93 * memory.
94 */
95#define _PAGE_BIT_PRESENT	0
96#define _PAGE_BIT_RW		1
97#define _PAGE_BIT_USER		2
98#define _PAGE_BIT_PWT		3
99#define _PAGE_BIT_PCD		4
100#define _PAGE_BIT_ACCESSED	5
101#define _PAGE_BIT_DIRTY		6
102#define _PAGE_BIT_PSE		7	/* 4 MB (or 2MB) page, Pentium+, if present.. */
103#define _PAGE_BIT_GLOBAL	8	/* Global TLB entry PPro+ */
104#define _PAGE_BIT_UNUSED1	9	/* available for programmer */
105#define _PAGE_BIT_UNUSED2	10
106#define _PAGE_BIT_UNUSED3	11
107#define _PAGE_BIT_NX		63
108
109#define _PAGE_PRESENT	0x001
110#define _PAGE_RW	0x002
111#define _PAGE_USER	0x004
112#define _PAGE_PWT	0x008
113#define _PAGE_PCD	0x010
114#define _PAGE_ACCESSED	0x020
115#define _PAGE_DIRTY	0x040
116#define _PAGE_PSE	0x080	/* 4 MB (or 2MB) page, Pentium+, if present.. */
117#define _PAGE_GLOBAL	0x100	/* Global TLB entry PPro+ */
118#define _PAGE_UNUSED1	0x200	/* available for programmer */
119#define _PAGE_UNUSED2	0x400
120#define _PAGE_UNUSED3	0x800
121
122/* If _PAGE_PRESENT is clear, we use these: */
123#define _PAGE_FILE	0x040	/* nonlinear file mapping, saved PTE; unset:swap */
124#define _PAGE_PROTNONE	0x080	/* if the user mapped it with PROT_NONE;
125				   pte_present gives true */
126#ifdef CONFIG_X86_PAE
127#define _PAGE_NX	(1ULL<<_PAGE_BIT_NX)
128#else
129#define _PAGE_NX	0
130#endif
131
132#define _PAGE_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
133#define _KERNPG_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
134#define _PAGE_CHG_MASK	(PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
135
136#define PAGE_NONE \
137	__pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
138#define PAGE_SHARED \
139	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
140
141#define PAGE_SHARED_EXEC \
142	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
143#define PAGE_COPY_NOEXEC \
144	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
145#define PAGE_COPY_EXEC \
146	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
147#define PAGE_COPY \
148	PAGE_COPY_NOEXEC
149#define PAGE_READONLY \
150	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
151#define PAGE_READONLY_EXEC \
152	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
153
154#define _PAGE_KERNEL \
155	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
156#define _PAGE_KERNEL_EXEC \
157	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
158
159extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
160#define __PAGE_KERNEL_RO		(__PAGE_KERNEL & ~_PAGE_RW)
161#define __PAGE_KERNEL_RX		(__PAGE_KERNEL_EXEC & ~_PAGE_RW)
162#define __PAGE_KERNEL_NOCACHE		(__PAGE_KERNEL | _PAGE_PCD)
163#define __PAGE_KERNEL_LARGE		(__PAGE_KERNEL | _PAGE_PSE)
164#define __PAGE_KERNEL_LARGE_EXEC	(__PAGE_KERNEL_EXEC | _PAGE_PSE)
165
166#define PAGE_KERNEL		__pgprot(__PAGE_KERNEL)
167#define PAGE_KERNEL_RO		__pgprot(__PAGE_KERNEL_RO)
168#define PAGE_KERNEL_EXEC	__pgprot(__PAGE_KERNEL_EXEC)
169#define PAGE_KERNEL_RX		__pgprot(__PAGE_KERNEL_RX)
170#define PAGE_KERNEL_NOCACHE	__pgprot(__PAGE_KERNEL_NOCACHE)
171#define PAGE_KERNEL_LARGE	__pgprot(__PAGE_KERNEL_LARGE)
172#define PAGE_KERNEL_LARGE_EXEC	__pgprot(__PAGE_KERNEL_LARGE_EXEC)
173
174/*
175 * The i386 can't do page protection for execute, and considers that
176 * the same are read. Also, write permissions imply read permissions.
177 * This is the closest we can get..
178 */
179#define __P000	PAGE_NONE
180#define __P001	PAGE_READONLY
181#define __P010	PAGE_COPY
182#define __P011	PAGE_COPY
183#define __P100	PAGE_READONLY_EXEC
184#define __P101	PAGE_READONLY_EXEC
185#define __P110	PAGE_COPY_EXEC
186#define __P111	PAGE_COPY_EXEC
187
188#define __S000	PAGE_NONE
189#define __S001	PAGE_READONLY
190#define __S010	PAGE_SHARED
191#define __S011	PAGE_SHARED
192#define __S100	PAGE_READONLY_EXEC
193#define __S101	PAGE_READONLY_EXEC
194#define __S110	PAGE_SHARED_EXEC
195#define __S111	PAGE_SHARED_EXEC
196
197/*
198 * Define this if things work differently on an i386 and an i486:
199 * it will (on an i486) warn about kernel memory accesses that are
200 * done without a 'access_ok(VERIFY_WRITE,..)'
201 */
202#undef TEST_ACCESS_OK
203
204/* The boot page tables (all created as a single array) */
205extern unsigned long pg0[];
206
207#define pte_present(x)	((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
208
209/* To avoid harmful races, pmd_none(x) should check only the lower when PAE */
210#define pmd_none(x)	(!(unsigned long)pmd_val(x))
211#define pmd_present(x)	(pmd_val(x) & _PAGE_PRESENT)
212#define	pmd_bad(x)	((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
213
214
215#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
216
217/*
218 * The following only work if pte_present() is true.
219 * Undefined behaviour if not..
220 */
221static inline int pte_user(pte_t pte)		{ return (pte).pte_low & _PAGE_USER; }
222static inline int pte_read(pte_t pte)		{ return (pte).pte_low & _PAGE_USER; }
223static inline int pte_dirty(pte_t pte)		{ return (pte).pte_low & _PAGE_DIRTY; }
224static inline int pte_young(pte_t pte)		{ return (pte).pte_low & _PAGE_ACCESSED; }
225static inline int pte_write(pte_t pte)		{ return (pte).pte_low & _PAGE_RW; }
226static inline int pte_huge(pte_t pte)		{ return (pte).pte_low & _PAGE_PSE; }
227
228/*
229 * The following only works if pte_present() is not true.
230 */
231static inline int pte_file(pte_t pte)		{ return (pte).pte_low & _PAGE_FILE; }
232
233static inline pte_t pte_rdprotect(pte_t pte)	{ (pte).pte_low &= ~_PAGE_USER; return pte; }
234static inline pte_t pte_exprotect(pte_t pte)	{ (pte).pte_low &= ~_PAGE_USER; return pte; }
235static inline pte_t pte_mkclean(pte_t pte)	{ (pte).pte_low &= ~_PAGE_DIRTY; return pte; }
236static inline pte_t pte_mkold(pte_t pte)	{ (pte).pte_low &= ~_PAGE_ACCESSED; return pte; }
237static inline pte_t pte_wrprotect(pte_t pte)	{ (pte).pte_low &= ~_PAGE_RW; return pte; }
238static inline pte_t pte_mkread(pte_t pte)	{ (pte).pte_low |= _PAGE_USER; return pte; }
239static inline pte_t pte_mkexec(pte_t pte)	{ (pte).pte_low |= _PAGE_USER; return pte; }
240static inline pte_t pte_mkdirty(pte_t pte)	{ (pte).pte_low |= _PAGE_DIRTY; return pte; }
241static inline pte_t pte_mkyoung(pte_t pte)	{ (pte).pte_low |= _PAGE_ACCESSED; return pte; }
242static inline pte_t pte_mkwrite(pte_t pte)	{ (pte).pte_low |= _PAGE_RW; return pte; }
243static inline pte_t pte_mkhuge(pte_t pte)	{ (pte).pte_low |= _PAGE_PSE; return pte; }
244
245#ifdef CONFIG_X86_PAE
246# include <asm/pgtable-3level.h>
247#else
248# include <asm/pgtable-2level.h>
249#endif
250
251#ifndef CONFIG_PARAVIRT
252/*
253 * Rules for using pte_update - it must be called after any PTE update which
254 * has not been done using the set_pte / clear_pte interfaces.  It is used by
255 * shadow mode hypervisors to resynchronize the shadow page tables.  Kernel PTE
256 * updates should either be sets, clears, or set_pte_atomic for P->P
257 * transitions, which means this hook should only be called for user PTEs.
258 * This hook implies a P->P protection or access change has taken place, which
259 * requires a subsequent TLB flush.  The notification can optionally be delayed
260 * until the TLB flush event by using the pte_update_defer form of the
261 * interface, but care must be taken to assure that the flush happens while
262 * still holding the same page table lock so that the shadow and primary pages
263 * do not become out of sync on SMP.
264 */
265#define pte_update(mm, addr, ptep)		do { } while (0)
266#define pte_update_defer(mm, addr, ptep)	do { } while (0)
267#endif
268
269/* local pte updates need not use xchg for locking */
270static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
271{
272	pte_t res = *ptep;
273
274	/* Pure native function needs no input for mm, addr */
275	native_pte_clear(NULL, 0, ptep);
276	return res;
277}
278
279/*
280 * We only update the dirty/accessed state if we set
281 * the dirty bit by hand in the kernel, since the hardware
282 * will do the accessed bit for us, and we don't want to
283 * race with other CPU's that might be updating the dirty
284 * bit at the same time.
285 */
286#define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
287#define ptep_set_access_flags(vma, address, ptep, entry, dirty)		\
288({									\
289	int __changed = !pte_same(*(ptep), entry);			\
290	if (__changed && dirty) {					\
291		(ptep)->pte_low = (entry).pte_low;			\
292		pte_update_defer((vma)->vm_mm, (address), (ptep));	\
293		flush_tlb_page(vma, address);				\
294	}								\
295	__changed;							\
296})
297
298#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
299#define ptep_test_and_clear_dirty(vma, addr, ptep) ({			\
300	int __ret = 0;							\
301	if (pte_dirty(*(ptep)))						\
302		__ret = test_and_clear_bit(_PAGE_BIT_DIRTY,		\
303						&(ptep)->pte_low);	\
304	if (__ret)							\
305		pte_update((vma)->vm_mm, addr, ptep);			\
306	__ret;								\
307})
308
309#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
310#define ptep_test_and_clear_young(vma, addr, ptep) ({			\
311	int __ret = 0;							\
312	if (pte_young(*(ptep)))						\
313		__ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,		\
314						&(ptep)->pte_low);	\
315	if (__ret)							\
316		pte_update((vma)->vm_mm, addr, ptep);			\
317	__ret;								\
318})
319
320/*
321 * Rules for using ptep_establish: the pte MUST be a user pte, and
322 * must be a present->present transition.
323 */
324#define __HAVE_ARCH_PTEP_ESTABLISH
325#define ptep_establish(vma, address, ptep, pteval)			\
326do {									\
327	set_pte_present((vma)->vm_mm, address, ptep, pteval);		\
328	flush_tlb_page(vma, address);					\
329} while (0)
330
331#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
332#define ptep_clear_flush_dirty(vma, address, ptep)			\
333({									\
334	int __dirty;							\
335	__dirty = ptep_test_and_clear_dirty((vma), (address), (ptep));	\
336	if (__dirty)							\
337		flush_tlb_page(vma, address);				\
338	__dirty;							\
339})
340
341#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
342#define ptep_clear_flush_young(vma, address, ptep)			\
343({									\
344	int __young;							\
345	__young = ptep_test_and_clear_young((vma), (address), (ptep));	\
346	if (__young)							\
347		flush_tlb_page(vma, address);				\
348	__young;							\
349})
350
351#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
352static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
353{
354	pte_t pte = native_ptep_get_and_clear(ptep);
355	pte_update(mm, addr, ptep);
356	return pte;
357}
358
359#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
360static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
361{
362	pte_t pte;
363	if (full) {
364		/*
365		 * Full address destruction in progress; paravirt does not
366		 * care about updates and native needs no locking
367		 */
368		pte = native_local_ptep_get_and_clear(ptep);
369	} else {
370		pte = ptep_get_and_clear(mm, addr, ptep);
371	}
372	return pte;
373}
374
375#define __HAVE_ARCH_PTEP_SET_WRPROTECT
376static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
377{
378	clear_bit(_PAGE_BIT_RW, &ptep->pte_low);
379	pte_update(mm, addr, ptep);
380}
381
382/*
383 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
384 *
385 *  dst - pointer to pgd range anwhere on a pgd page
386 *  src - ""
387 *  count - the number of pgds to copy.
388 *
389 * dst and src can be on the same page, but the range must not overlap,
390 * and must not cross a page boundary.
391 */
392static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
393{
394       memcpy(dst, src, count * sizeof(pgd_t));
395}
396
397/*
398 * Macro to mark a page protection value as "uncacheable".  On processors which do not support
399 * it, this is a no-op.
400 */
401#define pgprot_noncached(prot)	((boot_cpu_data.x86 > 3)					  \
402				 ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot))
403
404/*
405 * Conversion functions: convert a page and protection to a page entry,
406 * and a page entry and page directory to the page they refer to.
407 */
408
409#define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
410
411static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
412{
413	pte.pte_low &= _PAGE_CHG_MASK;
414	pte.pte_low |= pgprot_val(newprot);
415#ifdef CONFIG_X86_PAE
416	/*
417	 * Chop off the NX bit (if present), and add the NX portion of
418	 * the newprot (if present):
419	 */
420	pte.pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
421	pte.pte_high |= (pgprot_val(newprot) >> 32) & \
422					(__supported_pte_mask >> 32);
423#endif
424	return pte;
425}
426
427#define pmd_large(pmd) \
428((pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT))
429
430/*
431 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
432 *
433 * this macro returns the index of the entry in the pgd page which would
434 * control the given virtual address
435 */
436#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
437#define pgd_index_k(addr) pgd_index(addr)
438
439/*
440 * pgd_offset() returns a (pgd_t *)
441 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
442 */
443#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
444
445/*
446 * a shortcut which implies the use of the kernel's pgd, instead
447 * of a process's
448 */
449#define pgd_offset_k(address) pgd_offset(&init_mm, address)
450
451/*
452 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
453 *
454 * this macro returns the index of the entry in the pmd page which would
455 * control the given virtual address
456 */
457#define pmd_index(address) \
458		(((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
459
460/*
461 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
462 *
463 * this macro returns the index of the entry in the pte page which would
464 * control the given virtual address
465 */
466#define pte_index(address) \
467		(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
468#define pte_offset_kernel(dir, address) \
469	((pte_t *) pmd_page_vaddr(*(dir)) +  pte_index(address))
470
471#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
472
473#define pmd_page_vaddr(pmd) \
474		((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
475
476/*
477 * Helper function that returns the kernel pagetable entry controlling
478 * the virtual address 'address'. NULL means no pagetable entry present.
479 * NOTE: the return type is pte_t but if the pmd is PSE then we return it
480 * as a pte too.
481 */
482extern pte_t *lookup_address(unsigned long address);
483
484/*
485 * Make a given kernel text page executable/non-executable.
486 * Returns the previous executability setting of that page (which
487 * is used to restore the previous state). Used by the SMP bootup code.
488 * NOTE: this is an __init function for security reasons.
489 */
490#ifdef CONFIG_X86_PAE
491 extern int set_kernel_exec(unsigned long vaddr, int enable);
492#else
493 static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;}
494#endif
495
496#if defined(CONFIG_HIGHPTE)
497#define pte_offset_map(dir, address) \
498	((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
499#define pte_offset_map_nested(dir, address) \
500	((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
501#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
502#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
503#else
504#define pte_offset_map(dir, address) \
505	((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
506#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
507#define pte_unmap(pte) do { } while (0)
508#define pte_unmap_nested(pte) do { } while (0)
509#endif
510
511/* Clear a kernel PTE and flush it from the TLB */
512#define kpte_clear_flush(ptep, vaddr)					\
513do {									\
514	pte_clear(&init_mm, vaddr, ptep);				\
515	__flush_tlb_one(vaddr);						\
516} while (0)
517
518/*
519 * The i386 doesn't have any external MMU info: the kernel page
520 * tables contain all the necessary information.
521 */
522#define update_mmu_cache(vma,address,pte) do { } while (0)
523
524void native_pagetable_setup_start(pgd_t *base);
525void native_pagetable_setup_done(pgd_t *base);
526
527#ifndef CONFIG_PARAVIRT
528static inline void paravirt_pagetable_setup_start(pgd_t *base)
529{
530	native_pagetable_setup_start(base);
531}
532
533static inline void paravirt_pagetable_setup_done(pgd_t *base)
534{
535	native_pagetable_setup_done(base);
536}
537#endif	/* !CONFIG_PARAVIRT */
538
539#endif /* !__ASSEMBLY__ */
540
541#ifdef CONFIG_FLATMEM
542#define kern_addr_valid(addr)	(1)
543#endif /* CONFIG_FLATMEM */
544
545#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)		\
546		remap_pfn_range(vma, vaddr, pfn, size, prot)
547
548#include <asm-generic/pgtable.h>
549
550#endif /* _I386_PGTABLE_H */
551