1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _PARISC_PGTABLE_H
3#define _PARISC_PGTABLE_H
4
5#include <asm/page.h>
6
7#if CONFIG_PGTABLE_LEVELS == 3
8#include <asm-generic/pgtable-nopud.h>
9#elif CONFIG_PGTABLE_LEVELS == 2
10#include <asm-generic/pgtable-nopmd.h>
11#endif
12
13#include <asm/fixmap.h>
14
15#ifndef __ASSEMBLY__
16/*
17 * we simulate an x86-style page table for the linux mm code
18 */
19
20#include <linux/bitops.h>
21#include <linux/spinlock.h>
22#include <linux/mm_types.h>
23#include <asm/processor.h>
24#include <asm/cache.h>
25
26/* This is for the serialization of PxTLB broadcasts. At least on the N class
27 * systems, only one PxTLB inter processor broadcast can be active at any one
28 * time on the Merced bus. */
29extern spinlock_t pa_tlb_flush_lock;
30#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
31extern int pa_serialize_tlb_flushes;
32#else
33#define pa_serialize_tlb_flushes        (0)
34#endif
35
36#define purge_tlb_start(flags)  do { \
37	if (pa_serialize_tlb_flushes)	\
38		spin_lock_irqsave(&pa_tlb_flush_lock, flags); \
39	else \
40		local_irq_save(flags);	\
41	} while (0)
42#define purge_tlb_end(flags)	do { \
43	if (pa_serialize_tlb_flushes)	\
44		spin_unlock_irqrestore(&pa_tlb_flush_lock, flags); \
45	else \
46		local_irq_restore(flags); \
47	} while (0)
48
49/* Purge data and instruction TLB entries. The TLB purge instructions
50 * are slow on SMP machines since the purge must be broadcast to all CPUs.
51 */
52
53static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
54{
55	unsigned long flags;
56
57	purge_tlb_start(flags);
58	mtsp(mm->context.space_id, SR_TEMP1);
59	pdtlb(SR_TEMP1, addr);
60	pitlb(SR_TEMP1, addr);
61	purge_tlb_end(flags);
62}
63
64extern void __update_cache(pte_t pte);
65
66/* Certain architectures need to do special things when PTEs
67 * within a page table are directly modified.  Thus, the following
68 * hook is made available.
69 */
70#define set_pte(pteptr, pteval)			\
71	do {					\
72		*(pteptr) = (pteval);		\
73		mb();				\
74	} while(0)
75
76#endif /* !__ASSEMBLY__ */
77
78#define pte_ERROR(e) \
79	printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
80#if CONFIG_PGTABLE_LEVELS == 3
81#define pmd_ERROR(e) \
82	printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, (unsigned long)pmd_val(e))
83#endif
84#define pgd_ERROR(e) \
85	printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
86
87/* This is the size of the initially mapped kernel memory */
88#if defined(CONFIG_64BIT)
89#define KERNEL_INITIAL_ORDER	26	/* 1<<26 = 64MB */
90#else
91#define KERNEL_INITIAL_ORDER	25	/* 1<<25 = 32MB */
92#endif
93#define KERNEL_INITIAL_SIZE	(1 << KERNEL_INITIAL_ORDER)
94
95#if CONFIG_PGTABLE_LEVELS == 3
96#define PMD_TABLE_ORDER	1
97#define PGD_TABLE_ORDER	0
98#else
99#define PGD_TABLE_ORDER	1
100#endif
101
102/* Definitions for 3rd level (we use PLD here for Page Lower directory
103 * because PTE_SHIFT is used lower down to mean shift that has to be
104 * done to get usable bits out of the PTE) */
105#define PLD_SHIFT	PAGE_SHIFT
106#define PLD_SIZE	PAGE_SIZE
107#define BITS_PER_PTE	(PAGE_SHIFT - BITS_PER_PTE_ENTRY)
108#define PTRS_PER_PTE    (1UL << BITS_PER_PTE)
109
110/* Definitions for 2nd level */
111#if CONFIG_PGTABLE_LEVELS == 3
112#define PMD_SHIFT       (PLD_SHIFT + BITS_PER_PTE)
113#define PMD_SIZE	(1UL << PMD_SHIFT)
114#define PMD_MASK	(~(PMD_SIZE-1))
115#define BITS_PER_PMD	(PAGE_SHIFT + PMD_TABLE_ORDER - BITS_PER_PMD_ENTRY)
116#define PTRS_PER_PMD    (1UL << BITS_PER_PMD)
117#else
118#define BITS_PER_PMD	0
119#endif
120
121/* Definitions for 1st level */
122#define PGDIR_SHIFT	(PLD_SHIFT + BITS_PER_PTE + BITS_PER_PMD)
123#if (PGDIR_SHIFT + PAGE_SHIFT + PGD_TABLE_ORDER - BITS_PER_PGD_ENTRY) > BITS_PER_LONG
124#define BITS_PER_PGD	(BITS_PER_LONG - PGDIR_SHIFT)
125#else
126#define BITS_PER_PGD	(PAGE_SHIFT + PGD_TABLE_ORDER - BITS_PER_PGD_ENTRY)
127#endif
128#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
129#define PGDIR_MASK	(~(PGDIR_SIZE-1))
130#define PTRS_PER_PGD    (1UL << BITS_PER_PGD)
131#define USER_PTRS_PER_PGD       PTRS_PER_PGD
132
133#ifdef CONFIG_64BIT
134#define MAX_ADDRBITS	(PGDIR_SHIFT + BITS_PER_PGD)
135#define MAX_ADDRESS	(1UL << MAX_ADDRBITS)
136#define SPACEID_SHIFT	(MAX_ADDRBITS - 32)
137#else
138#define MAX_ADDRBITS	(BITS_PER_LONG)
139#define MAX_ADDRESS	(1ULL << MAX_ADDRBITS)
140#define SPACEID_SHIFT	0
141#endif
142
143/* This calculates the number of initial pages we need for the initial
144 * page tables */
145#if (KERNEL_INITIAL_ORDER) >= (PLD_SHIFT + BITS_PER_PTE)
146# define PT_INITIAL	(1 << (KERNEL_INITIAL_ORDER - PLD_SHIFT - BITS_PER_PTE))
147#else
148# define PT_INITIAL	(1)  /* all initial PTEs fit into one page */
149#endif
150
151/*
152 * pgd entries used up by user/kernel:
153 */
154
155/* NB: The tlb miss handlers make certain assumptions about the order */
156/*     of the following bits, so be careful (One example, bits 25-31  */
157/*     are moved together in one instruction).                        */
158
159#define _PAGE_READ_BIT     31   /* (0x001) read access allowed */
160#define _PAGE_WRITE_BIT    30   /* (0x002) write access allowed */
161#define _PAGE_EXEC_BIT     29   /* (0x004) execute access allowed */
162#define _PAGE_GATEWAY_BIT  28   /* (0x008) privilege promotion allowed */
163#define _PAGE_DMB_BIT      27   /* (0x010) Data Memory Break enable (B bit) */
164#define _PAGE_DIRTY_BIT    26   /* (0x020) Page Dirty (D bit) */
165#define _PAGE_REFTRAP_BIT  25   /* (0x040) Page Ref. Trap enable (T bit) */
166#define _PAGE_NO_CACHE_BIT 24   /* (0x080) Uncached Page (U bit) */
167#define _PAGE_ACCESSED_BIT 23   /* (0x100) Software: Page Accessed */
168#define _PAGE_PRESENT_BIT  22   /* (0x200) Software: translation valid */
169#define _PAGE_HPAGE_BIT    21   /* (0x400) Software: Huge Page */
170#define _PAGE_USER_BIT     20   /* (0x800) Software: User accessible page */
171#ifdef CONFIG_HUGETLB_PAGE
172#define _PAGE_SPECIAL_BIT  _PAGE_DMB_BIT  /* DMB feature is currently unused */
173#else
174#define _PAGE_SPECIAL_BIT  _PAGE_HPAGE_BIT /* use unused HUGE PAGE bit */
175#endif
176
177/* N.B. The bits are defined in terms of a 32 bit word above, so the */
178/*      following macro is ok for both 32 and 64 bit.                */
179
180#define xlate_pabit(x) (31 - x)
181
182/* this defines the shift to the usable bits in the PTE it is set so
183 * that the valid bits _PAGE_PRESENT_BIT and _PAGE_USER_BIT are set
184 * to zero */
185#define PTE_SHIFT	   	xlate_pabit(_PAGE_USER_BIT)
186
187/* PFN_PTE_SHIFT defines the shift of a PTE value to access the PFN field */
188#define PFN_PTE_SHIFT		12
189
190#define _PAGE_READ     (1 << xlate_pabit(_PAGE_READ_BIT))
191#define _PAGE_WRITE    (1 << xlate_pabit(_PAGE_WRITE_BIT))
192#define _PAGE_RW       (_PAGE_READ | _PAGE_WRITE)
193#define _PAGE_EXEC     (1 << xlate_pabit(_PAGE_EXEC_BIT))
194#define _PAGE_GATEWAY  (1 << xlate_pabit(_PAGE_GATEWAY_BIT))
195#define _PAGE_DMB      (1 << xlate_pabit(_PAGE_DMB_BIT))
196#define _PAGE_DIRTY    (1 << xlate_pabit(_PAGE_DIRTY_BIT))
197#define _PAGE_REFTRAP  (1 << xlate_pabit(_PAGE_REFTRAP_BIT))
198#define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT))
199#define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT))
200#define _PAGE_PRESENT  (1 << xlate_pabit(_PAGE_PRESENT_BIT))
201#define _PAGE_HUGE     (1 << xlate_pabit(_PAGE_HPAGE_BIT))
202#define _PAGE_USER     (1 << xlate_pabit(_PAGE_USER_BIT))
203#define _PAGE_SPECIAL  (1 << xlate_pabit(_PAGE_SPECIAL_BIT))
204
205#define _PAGE_TABLE	(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
206#define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
207#define _PAGE_KERNEL_RO	(_PAGE_PRESENT | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED)
208#define _PAGE_KERNEL_EXEC	(_PAGE_KERNEL_RO | _PAGE_EXEC)
209#define _PAGE_KERNEL_RWX	(_PAGE_KERNEL_EXEC | _PAGE_WRITE)
210#define _PAGE_KERNEL		(_PAGE_KERNEL_RO | _PAGE_WRITE)
211
212/* We borrow bit 23 to store the exclusive marker in swap PTEs. */
213#define _PAGE_SWP_EXCLUSIVE	_PAGE_ACCESSED
214
215/* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds
216 * are page-aligned, we don't care about the PAGE_OFFSET bits, except
217 * for a few meta-information bits, so we shift the address to be
218 * able to effectively address 40/42/44-bits of physical address space
219 * depending on 4k/16k/64k PAGE_SIZE */
220#define _PxD_PRESENT_BIT   31
221#define _PxD_VALID_BIT     30
222
223#define PxD_FLAG_PRESENT  (1 << xlate_pabit(_PxD_PRESENT_BIT))
224#define PxD_FLAG_VALID    (1 << xlate_pabit(_PxD_VALID_BIT))
225#define PxD_FLAG_MASK     (0xf)
226#define PxD_FLAG_SHIFT    (4)
227#define PxD_VALUE_SHIFT   (PFN_PTE_SHIFT-PxD_FLAG_SHIFT)
228
229#ifndef __ASSEMBLY__
230
231#define PAGE_NONE	__pgprot(_PAGE_PRESENT | _PAGE_USER)
232#define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE)
233/* Others seem to make this executable, I don't know if that's correct
234   or not.  The stack is mapped this way though so this is necessary
235   in the short term - dhd@linuxcare.com, 2000-08-08 */
236#define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ)
237#define PAGE_WRITEONLY  __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE)
238#define PAGE_EXECREAD   __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC)
239#define PAGE_COPY       PAGE_EXECREAD
240#define PAGE_RWX        __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
241#define PAGE_KERNEL	__pgprot(_PAGE_KERNEL)
242#define PAGE_KERNEL_EXEC	__pgprot(_PAGE_KERNEL_EXEC)
243#define PAGE_KERNEL_RWX	__pgprot(_PAGE_KERNEL_RWX)
244#define PAGE_KERNEL_RO	__pgprot(_PAGE_KERNEL_RO)
245#define PAGE_KERNEL_UNC	__pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
246#define PAGE_GATEWAY    __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_GATEWAY| _PAGE_READ)
247
248
249/*
250 * We could have an execute only page using "gateway - promote to priv
251 * level 3", but that is kind of silly. So, the way things are defined
252 * now, we must always have read permission for pages with execute
253 * permission. For the fun of it we'll go ahead and support write only
254 * pages.
255 */
256
257	 /*xwr*/
258
259extern pgd_t swapper_pg_dir[]; /* declared in init_task.c */
260
261/* initial page tables for 0-8MB for kernel */
262
263extern pte_t pg0[];
264
265/* zero page used for uninitialized stuff */
266
267extern unsigned long *empty_zero_page;
268
269/*
270 * ZERO_PAGE is a global shared page that is always zero: used
271 * for zero-mapped memory areas etc..
272 */
273
274#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
275
276#define pte_none(x)     (pte_val(x) == 0)
277#define pte_present(x)	(pte_val(x) & _PAGE_PRESENT)
278#define pte_user(x)	(pte_val(x) & _PAGE_USER)
279#define pte_clear(mm, addr, xp)  set_pte(xp, __pte(0))
280
281#define pmd_flag(x)	(pmd_val(x) & PxD_FLAG_MASK)
282#define pmd_address(x)	((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
283#define pud_flag(x)	(pud_val(x) & PxD_FLAG_MASK)
284#define pud_address(x)	((unsigned long)(pud_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
285#define pgd_flag(x)	(pgd_val(x) & PxD_FLAG_MASK)
286#define pgd_address(x)	((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
287
288#define pmd_none(x)	(!pmd_val(x))
289#define pmd_bad(x)	(!(pmd_flag(x) & PxD_FLAG_VALID))
290#define pmd_present(x)	(pmd_flag(x) & PxD_FLAG_PRESENT)
291static inline void pmd_clear(pmd_t *pmd) {
292		set_pmd(pmd,  __pmd(0));
293}
294
295
296
297#if CONFIG_PGTABLE_LEVELS == 3
298#define pud_pgtable(pud) ((pmd_t *) __va(pud_address(pud)))
299#define pud_page(pud)	virt_to_page((void *)pud_pgtable(pud))
300
301/* For 64 bit we have three level tables */
302
303#define pud_none(x)     (!pud_val(x))
304#define pud_bad(x)      (!(pud_flag(x) & PxD_FLAG_VALID))
305#define pud_present(x)  (pud_flag(x) & PxD_FLAG_PRESENT)
306static inline void pud_clear(pud_t *pud) {
307	set_pud(pud, __pud(0));
308}
309#endif
310
311/*
312 * The following only work if pte_present() is true.
313 * Undefined behaviour if not..
314 */
315static inline int pte_dirty(pte_t pte)		{ return pte_val(pte) & _PAGE_DIRTY; }
316static inline int pte_young(pte_t pte)		{ return pte_val(pte) & _PAGE_ACCESSED; }
317static inline int pte_write(pte_t pte)		{ return pte_val(pte) & _PAGE_WRITE; }
318static inline int pte_special(pte_t pte)	{ return pte_val(pte) & _PAGE_SPECIAL; }
319
320static inline pte_t pte_mkclean(pte_t pte)	{ pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
321static inline pte_t pte_mkold(pte_t pte)	{ pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
322static inline pte_t pte_wrprotect(pte_t pte)	{ pte_val(pte) &= ~_PAGE_WRITE; return pte; }
323static inline pte_t pte_mkdirty(pte_t pte)	{ pte_val(pte) |= _PAGE_DIRTY; return pte; }
324static inline pte_t pte_mkyoung(pte_t pte)	{ pte_val(pte) |= _PAGE_ACCESSED; return pte; }
325static inline pte_t pte_mkwrite_novma(pte_t pte)	{ pte_val(pte) |= _PAGE_WRITE; return pte; }
326static inline pte_t pte_mkspecial(pte_t pte)	{ pte_val(pte) |= _PAGE_SPECIAL; return pte; }
327
328/*
329 * Huge pte definitions.
330 */
331#ifdef CONFIG_HUGETLB_PAGE
332#define pte_huge(pte)           (pte_val(pte) & _PAGE_HUGE)
333#define pte_mkhuge(pte)         (__pte(pte_val(pte) | \
334				 (parisc_requires_coherency() ? 0 : _PAGE_HUGE)))
335#else
336#define pte_huge(pte)           (0)
337#define pte_mkhuge(pte)         (pte)
338#endif
339
340
341/*
342 * Conversion functions: convert a page and protection to a page entry,
343 * and a page entry and page directory to the page they refer to.
344 */
345#define __mk_pte(addr,pgprot) \
346({									\
347	pte_t __pte;							\
348									\
349	pte_val(__pte) = ((((addr)>>PAGE_SHIFT)<<PFN_PTE_SHIFT) + pgprot_val(pgprot));	\
350									\
351	__pte;								\
352})
353
354#define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
355
356static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
357{
358	pte_t pte;
359	pte_val(pte) = (pfn << PFN_PTE_SHIFT) | pgprot_val(pgprot);
360	return pte;
361}
362
363static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
364{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
365
366/* Permanent address of a page.  On parisc we don't have highmem. */
367
368#define pte_pfn(x)		(pte_val(x) >> PFN_PTE_SHIFT)
369
370#define pte_page(pte)		(pfn_to_page(pte_pfn(pte)))
371
372static inline unsigned long pmd_page_vaddr(pmd_t pmd)
373{
374	return ((unsigned long) __va(pmd_address(pmd)));
375}
376
377#define pmd_pfn(pmd)	(pmd_address(pmd) >> PAGE_SHIFT)
378#define __pmd_page(pmd) ((unsigned long) __va(pmd_address(pmd)))
379#define pmd_page(pmd)	virt_to_page((void *)__pmd_page(pmd))
380
381/* Find an entry in the second-level page table.. */
382
383extern void paging_init (void);
384
385static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
386		pte_t *ptep, pte_t pte, unsigned int nr)
387{
388	if (pte_present(pte) && pte_user(pte))
389		__update_cache(pte);
390	for (;;) {
391		*ptep = pte;
392		purge_tlb_entries(mm, addr);
393		if (--nr == 0)
394			break;
395		ptep++;
396		pte_val(pte) += 1 << PFN_PTE_SHIFT;
397		addr += PAGE_SIZE;
398	}
399}
400#define set_ptes set_ptes
401
402/* Used for deferring calls to flush_dcache_page() */
403
404#define PG_dcache_dirty         PG_arch_1
405
406#define update_mmu_cache_range(vmf, vma, addr, ptep, nr) __update_cache(*ptep)
407#define update_mmu_cache(vma, addr, ptep) __update_cache(*ptep)
408
409/*
410 * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
411 * are !pte_none() && !pte_present().
412 *
413 * Format of swap PTEs (32bit):
414 *
415 *                         1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3
416 *   0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
417 *   <---------------- offset -----------------> P E <ofs> < type ->
418 *
419 *   E is the exclusive marker that is not stored in swap entries.
420 *   _PAGE_PRESENT (P) must be 0.
421 *
422 *   For the 64bit version, the offset is extended by 32bit.
423 */
424#define __swp_type(x)                     ((x).val & 0x1f)
425#define __swp_offset(x)                   ( (((x).val >> 5) & 0x7) | \
426					  (((x).val >> 10) << 3) )
427#define __swp_entry(type, offset)         ((swp_entry_t) { \
428					    ((type) & 0x1f) | \
429					    ((offset & 0x7) << 5) | \
430					    ((offset >> 3) << 10) })
431#define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
432#define __swp_entry_to_pte(x)		((pte_t) { (x).val })
433
434static inline int pte_swp_exclusive(pte_t pte)
435{
436	return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
437}
438
439static inline pte_t pte_swp_mkexclusive(pte_t pte)
440{
441	pte_val(pte) |= _PAGE_SWP_EXCLUSIVE;
442	return pte;
443}
444
445static inline pte_t pte_swp_clear_exclusive(pte_t pte)
446{
447	pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE;
448	return pte;
449}
450
451static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
452{
453	pte_t pte;
454
455	if (!pte_young(*ptep))
456		return 0;
457
458	pte = *ptep;
459	if (!pte_young(pte)) {
460		return 0;
461	}
462	set_pte(ptep, pte_mkold(pte));
463	return 1;
464}
465
466struct mm_struct;
467static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
468{
469	pte_t old_pte;
470
471	old_pte = *ptep;
472	set_pte(ptep, __pte(0));
473
474	return old_pte;
475}
476
477static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
478{
479	set_pte(ptep, pte_wrprotect(*ptep));
480}
481
482#define pte_same(A,B)	(pte_val(A) == pte_val(B))
483
484#endif /* !__ASSEMBLY__ */
485
486
487/* TLB page size encoding - see table 3-1 in parisc20.pdf */
488#define _PAGE_SIZE_ENCODING_4K		0
489#define _PAGE_SIZE_ENCODING_16K		1
490#define _PAGE_SIZE_ENCODING_64K		2
491#define _PAGE_SIZE_ENCODING_256K	3
492#define _PAGE_SIZE_ENCODING_1M		4
493#define _PAGE_SIZE_ENCODING_4M		5
494#define _PAGE_SIZE_ENCODING_16M		6
495#define _PAGE_SIZE_ENCODING_64M		7
496
497#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
498# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4K
499#elif defined(CONFIG_PARISC_PAGE_SIZE_16KB)
500# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16K
501#elif defined(CONFIG_PARISC_PAGE_SIZE_64KB)
502# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_64K
503#endif
504
505
506#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_NO_CACHE)
507
508/* We provide our own get_unmapped_area to provide cache coherency */
509
510#define HAVE_ARCH_UNMAPPED_AREA
511#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
512
513#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
514#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
515#define __HAVE_ARCH_PTEP_SET_WRPROTECT
516#define __HAVE_ARCH_PTE_SAME
517
518#endif /* _PARISC_PGTABLE_H */
519