1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4 *
5 * Derived from MIPS:
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_PGTABLE_H
10#define _ASM_PGTABLE_H
11
12#include <linux/compiler.h>
13#include <asm/addrspace.h>
14#include <asm/page.h>
15#include <asm/pgtable-bits.h>
16
17#if CONFIG_PGTABLE_LEVELS == 2
18#include <asm-generic/pgtable-nopmd.h>
19#elif CONFIG_PGTABLE_LEVELS == 3
20#include <asm-generic/pgtable-nopud.h>
21#else
22#include <asm-generic/pgtable-nop4d.h>
23#endif
24
25#if CONFIG_PGTABLE_LEVELS == 2
26#define PGDIR_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT - 3))
27#elif CONFIG_PGTABLE_LEVELS == 3
28#define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT - 3))
29#define PMD_SIZE	(1UL << PMD_SHIFT)
30#define PMD_MASK	(~(PMD_SIZE-1))
31#define PGDIR_SHIFT	(PMD_SHIFT + (PAGE_SHIFT - 3))
32#elif CONFIG_PGTABLE_LEVELS == 4
33#define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT - 3))
34#define PMD_SIZE	(1UL << PMD_SHIFT)
35#define PMD_MASK	(~(PMD_SIZE-1))
36#define PUD_SHIFT	(PMD_SHIFT + (PAGE_SHIFT - 3))
37#define PUD_SIZE	(1UL << PUD_SHIFT)
38#define PUD_MASK	(~(PUD_SIZE-1))
39#define PGDIR_SHIFT	(PUD_SHIFT + (PAGE_SHIFT - 3))
40#endif
41
42#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
43#define PGDIR_MASK	(~(PGDIR_SIZE-1))
44
45#define VA_BITS		(PGDIR_SHIFT + (PAGE_SHIFT - 3))
46
47#define PTRS_PER_PGD	(PAGE_SIZE >> 3)
48#if CONFIG_PGTABLE_LEVELS > 3
49#define PTRS_PER_PUD	(PAGE_SIZE >> 3)
50#endif
51#if CONFIG_PGTABLE_LEVELS > 2
52#define PTRS_PER_PMD	(PAGE_SIZE >> 3)
53#endif
54#define PTRS_PER_PTE	(PAGE_SIZE >> 3)
55
56#define USER_PTRS_PER_PGD       ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
57
58#ifndef __ASSEMBLY__
59
60#include <linux/mm_types.h>
61#include <linux/mmzone.h>
62#include <asm/fixmap.h>
63#include <asm/sparsemem.h>
64
65struct mm_struct;
66struct vm_area_struct;
67
68/*
69 * ZERO_PAGE is a global shared page that is always zero; used
70 * for zero-mapped memory areas etc..
71 */
72
73extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
74
75#define ZERO_PAGE(vaddr)	virt_to_page(empty_zero_page)
76
77/*
78 * TLB refill handlers may also map the vmalloc area into xkvrange.
79 * Avoid the first couple of pages so NULL pointer dereferences will
80 * still reliably trap.
81 */
82#define MODULES_VADDR	(vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE))
83#define MODULES_END	(MODULES_VADDR + SZ_256M)
84
85#ifdef CONFIG_KFENCE
86#define KFENCE_AREA_SIZE	(((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 + 2) * PAGE_SIZE)
87#else
88#define KFENCE_AREA_SIZE	0
89#endif
90
91#define VMALLOC_START	MODULES_END
92
93#ifndef CONFIG_KASAN
94#define VMALLOC_END	\
95	(vm_map_base +	\
96	 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
97#else
98#define VMALLOC_END	\
99	(vm_map_base +	\
100	 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits) / 2) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
101#endif
102
103#define vmemmap		((struct page *)((VMALLOC_END + PMD_SIZE) & PMD_MASK))
104#define VMEMMAP_END	((unsigned long)vmemmap + VMEMMAP_SIZE - 1)
105
106#define KFENCE_AREA_START	(VMEMMAP_END + 1)
107#define KFENCE_AREA_END		(KFENCE_AREA_START + KFENCE_AREA_SIZE - 1)
108
109#define pte_ERROR(e) \
110	pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
111#ifndef __PAGETABLE_PMD_FOLDED
112#define pmd_ERROR(e) \
113	pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
114#endif
115#ifndef __PAGETABLE_PUD_FOLDED
116#define pud_ERROR(e) \
117	pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
118#endif
119#define pgd_ERROR(e) \
120	pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
121
122extern pte_t invalid_pte_table[PTRS_PER_PTE];
123
124#ifndef __PAGETABLE_PUD_FOLDED
125
126typedef struct { unsigned long pud; } pud_t;
127#define pud_val(x)	((x).pud)
128#define __pud(x)	((pud_t) { (x) })
129
130extern pud_t invalid_pud_table[PTRS_PER_PUD];
131
132/*
133 * Empty pgd/p4d entries point to the invalid_pud_table.
134 */
135static inline int p4d_none(p4d_t p4d)
136{
137	return p4d_val(p4d) == (unsigned long)invalid_pud_table;
138}
139
140static inline int p4d_bad(p4d_t p4d)
141{
142	return p4d_val(p4d) & ~PAGE_MASK;
143}
144
145static inline int p4d_present(p4d_t p4d)
146{
147	return p4d_val(p4d) != (unsigned long)invalid_pud_table;
148}
149
150static inline void p4d_clear(p4d_t *p4dp)
151{
152	p4d_val(*p4dp) = (unsigned long)invalid_pud_table;
153}
154
155static inline pud_t *p4d_pgtable(p4d_t p4d)
156{
157	return (pud_t *)p4d_val(p4d);
158}
159
160static inline void set_p4d(p4d_t *p4d, p4d_t p4dval)
161{
162	*p4d = p4dval;
163}
164
165#define p4d_phys(p4d)		PHYSADDR(p4d_val(p4d))
166#define p4d_page(p4d)		(pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT))
167
168#endif
169
170#ifndef __PAGETABLE_PMD_FOLDED
171
172typedef struct { unsigned long pmd; } pmd_t;
173#define pmd_val(x)	((x).pmd)
174#define __pmd(x)	((pmd_t) { (x) })
175
176extern pmd_t invalid_pmd_table[PTRS_PER_PMD];
177
178/*
179 * Empty pud entries point to the invalid_pmd_table.
180 */
181static inline int pud_none(pud_t pud)
182{
183	return pud_val(pud) == (unsigned long)invalid_pmd_table;
184}
185
186static inline int pud_bad(pud_t pud)
187{
188	return pud_val(pud) & ~PAGE_MASK;
189}
190
191static inline int pud_present(pud_t pud)
192{
193	return pud_val(pud) != (unsigned long)invalid_pmd_table;
194}
195
196static inline void pud_clear(pud_t *pudp)
197{
198	pud_val(*pudp) = ((unsigned long)invalid_pmd_table);
199}
200
201static inline pmd_t *pud_pgtable(pud_t pud)
202{
203	return (pmd_t *)pud_val(pud);
204}
205
206#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while (0)
207
208#define pud_phys(pud)		PHYSADDR(pud_val(pud))
209#define pud_page(pud)		(pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
210
211#endif
212
213/*
214 * Empty pmd entries point to the invalid_pte_table.
215 */
216static inline int pmd_none(pmd_t pmd)
217{
218	return pmd_val(pmd) == (unsigned long)invalid_pte_table;
219}
220
221static inline int pmd_bad(pmd_t pmd)
222{
223	return (pmd_val(pmd) & ~PAGE_MASK);
224}
225
226static inline int pmd_present(pmd_t pmd)
227{
228	if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
229		return !!(pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PRESENT_INVALID));
230
231	return pmd_val(pmd) != (unsigned long)invalid_pte_table;
232}
233
234static inline void pmd_clear(pmd_t *pmdp)
235{
236	pmd_val(*pmdp) = ((unsigned long)invalid_pte_table);
237}
238
239#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while (0)
240
241#define pmd_phys(pmd)		PHYSADDR(pmd_val(pmd))
242
243#ifndef CONFIG_TRANSPARENT_HUGEPAGE
244#define pmd_page(pmd)		(pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
245#endif /* CONFIG_TRANSPARENT_HUGEPAGE  */
246
247#define pmd_page_vaddr(pmd)	pmd_val(pmd)
248
249extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
250extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd);
251
252#define pte_page(x)		pfn_to_page(pte_pfn(x))
253#define pte_pfn(x)		((unsigned long)(((x).pte & _PFN_MASK) >> PFN_PTE_SHIFT))
254#define pfn_pte(pfn, prot)	__pte(((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
255#define pfn_pmd(pfn, prot)	__pmd(((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
256
257/*
258 * Initialize a new pgd / pud / pmd table with invalid pointers.
259 */
260extern void pgd_init(void *addr);
261extern void pud_init(void *addr);
262extern void pmd_init(void *addr);
263
264/*
265 * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
266 * are !pte_none() && !pte_present().
267 *
268 * Format of swap PTEs:
269 *
270 *   6 6 6 6 5 5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3
271 *   3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
272 *   <--------------------------- offset ---------------------------
273 *
274 *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
275 *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
276 *   --------------> E <--- type ---> <---------- zeroes ---------->
277 *
278 *   E is the exclusive marker that is not stored in swap entries.
279 *   The zero'ed bits include _PAGE_PRESENT and _PAGE_PROTNONE.
280 */
281static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
282{ pte_t pte; pte_val(pte) = ((type & 0x7f) << 16) | (offset << 24); return pte; }
283
284#define __swp_type(x)		(((x).val >> 16) & 0x7f)
285#define __swp_offset(x)		((x).val >> 24)
286#define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
287#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
288#define __swp_entry_to_pte(x)	((pte_t) { (x).val })
289#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
290#define __swp_entry_to_pmd(x)	((pmd_t) { (x).val | _PAGE_HUGE })
291
292static inline int pte_swp_exclusive(pte_t pte)
293{
294	return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
295}
296
297static inline pte_t pte_swp_mkexclusive(pte_t pte)
298{
299	pte_val(pte) |= _PAGE_SWP_EXCLUSIVE;
300	return pte;
301}
302
303static inline pte_t pte_swp_clear_exclusive(pte_t pte)
304{
305	pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE;
306	return pte;
307}
308
309extern void paging_init(void);
310
311#define pte_none(pte)		(!(pte_val(pte) & ~_PAGE_GLOBAL))
312#define pte_present(pte)	(pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROTNONE))
313#define pte_no_exec(pte)	(pte_val(pte) & _PAGE_NO_EXEC)
314
315static inline void set_pte(pte_t *ptep, pte_t pteval)
316{
317	*ptep = pteval;
318	if (pte_val(pteval) & _PAGE_GLOBAL) {
319		pte_t *buddy = ptep_buddy(ptep);
320		/*
321		 * Make sure the buddy is global too (if it's !none,
322		 * it better already be global)
323		 */
324#ifdef CONFIG_SMP
325		/*
326		 * For SMP, multiple CPUs can race, so we need to do
327		 * this atomically.
328		 */
329		unsigned long page_global = _PAGE_GLOBAL;
330		unsigned long tmp;
331
332		__asm__ __volatile__ (
333		"1:"	__LL	"%[tmp], %[buddy]		\n"
334		"	bnez	%[tmp], 2f			\n"
335		"	 or	%[tmp], %[tmp], %[global]	\n"
336			__SC	"%[tmp], %[buddy]		\n"
337		"	beqz	%[tmp], 1b			\n"
338		"	nop					\n"
339		"2:						\n"
340		__WEAK_LLSC_MB
341		: [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
342		: [global] "r" (page_global));
343#else /* !CONFIG_SMP */
344		if (pte_none(*buddy))
345			pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
346#endif /* CONFIG_SMP */
347	}
348}
349
350static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
351{
352	/* Preserve global status for the pair */
353	if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
354		set_pte(ptep, __pte(_PAGE_GLOBAL));
355	else
356		set_pte(ptep, __pte(0));
357}
358
359#define PGD_T_LOG2	(__builtin_ffs(sizeof(pgd_t)) - 1)
360#define PMD_T_LOG2	(__builtin_ffs(sizeof(pmd_t)) - 1)
361#define PTE_T_LOG2	(__builtin_ffs(sizeof(pte_t)) - 1)
362
363extern pgd_t swapper_pg_dir[];
364extern pgd_t invalid_pg_dir[];
365
366/*
367 * The following only work if pte_present() is true.
368 * Undefined behaviour if not..
369 */
370static inline int pte_write(pte_t pte)	{ return pte_val(pte) & _PAGE_WRITE; }
371static inline int pte_young(pte_t pte)	{ return pte_val(pte) & _PAGE_ACCESSED; }
372static inline int pte_dirty(pte_t pte)	{ return pte_val(pte) & (_PAGE_DIRTY | _PAGE_MODIFIED); }
373
374static inline pte_t pte_mkold(pte_t pte)
375{
376	pte_val(pte) &= ~_PAGE_ACCESSED;
377	return pte;
378}
379
380static inline pte_t pte_mkyoung(pte_t pte)
381{
382	pte_val(pte) |= _PAGE_ACCESSED;
383	return pte;
384}
385
386static inline pte_t pte_mkclean(pte_t pte)
387{
388	pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED);
389	return pte;
390}
391
392static inline pte_t pte_mkdirty(pte_t pte)
393{
394	pte_val(pte) |= _PAGE_MODIFIED;
395	if (pte_val(pte) & _PAGE_WRITE)
396		pte_val(pte) |= _PAGE_DIRTY;
397	return pte;
398}
399
400static inline pte_t pte_mkwrite_novma(pte_t pte)
401{
402	pte_val(pte) |= _PAGE_WRITE;
403	if (pte_val(pte) & _PAGE_MODIFIED)
404		pte_val(pte) |= _PAGE_DIRTY;
405	return pte;
406}
407
408static inline pte_t pte_wrprotect(pte_t pte)
409{
410	pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
411	return pte;
412}
413
414static inline int pte_huge(pte_t pte)	{ return pte_val(pte) & _PAGE_HUGE; }
415
416static inline pte_t pte_mkhuge(pte_t pte)
417{
418	pte_val(pte) |= _PAGE_HUGE;
419	return pte;
420}
421
422#if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
423static inline int pte_special(pte_t pte)	{ return pte_val(pte) & _PAGE_SPECIAL; }
424static inline pte_t pte_mkspecial(pte_t pte)	{ pte_val(pte) |= _PAGE_SPECIAL; return pte; }
425#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
426
427#define pte_accessible pte_accessible
428static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
429{
430	if (pte_val(a) & _PAGE_PRESENT)
431		return true;
432
433	if ((pte_val(a) & _PAGE_PROTNONE) &&
434			atomic_read(&mm->tlb_flush_pending))
435		return true;
436
437	return false;
438}
439
440/*
441 * Conversion functions: convert a page and protection to a page entry,
442 * and a page entry and page directory to the page they refer to.
443 */
444#define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
445
446static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
447{
448	return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
449		     (pgprot_val(newprot) & ~_PAGE_CHG_MASK));
450}
451
452extern void __update_tlb(struct vm_area_struct *vma,
453			unsigned long address, pte_t *ptep);
454
455static inline void update_mmu_cache_range(struct vm_fault *vmf,
456		struct vm_area_struct *vma, unsigned long address,
457		pte_t *ptep, unsigned int nr)
458{
459	for (;;) {
460		__update_tlb(vma, address, ptep);
461		if (--nr == 0)
462			break;
463		address += PAGE_SIZE;
464		ptep++;
465	}
466}
467#define update_mmu_cache(vma, addr, ptep) \
468	update_mmu_cache_range(NULL, vma, addr, ptep, 1)
469
470#define __HAVE_ARCH_UPDATE_MMU_TLB
471#define update_mmu_tlb	update_mmu_cache
472
473static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
474			unsigned long address, pmd_t *pmdp)
475{
476	__update_tlb(vma, address, (pte_t *)pmdp);
477}
478
479static inline unsigned long pmd_pfn(pmd_t pmd)
480{
481	return (pmd_val(pmd) & _PFN_MASK) >> PFN_PTE_SHIFT;
482}
483
484#ifdef CONFIG_TRANSPARENT_HUGEPAGE
485
486/* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
487#define pmdp_establish generic_pmdp_establish
488
489static inline int pmd_trans_huge(pmd_t pmd)
490{
491	return !!(pmd_val(pmd) & _PAGE_HUGE) && pmd_present(pmd);
492}
493
494static inline pmd_t pmd_mkhuge(pmd_t pmd)
495{
496	pmd_val(pmd) = (pmd_val(pmd) & ~(_PAGE_GLOBAL)) |
497		((pmd_val(pmd) & _PAGE_GLOBAL) << (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT));
498	pmd_val(pmd) |= _PAGE_HUGE;
499
500	return pmd;
501}
502
503#define pmd_write pmd_write
504static inline int pmd_write(pmd_t pmd)
505{
506	return !!(pmd_val(pmd) & _PAGE_WRITE);
507}
508
509static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
510{
511	pmd_val(pmd) |= _PAGE_WRITE;
512	if (pmd_val(pmd) & _PAGE_MODIFIED)
513		pmd_val(pmd) |= _PAGE_DIRTY;
514	return pmd;
515}
516
517static inline pmd_t pmd_wrprotect(pmd_t pmd)
518{
519	pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
520	return pmd;
521}
522
523#define pmd_dirty pmd_dirty
524static inline int pmd_dirty(pmd_t pmd)
525{
526	return !!(pmd_val(pmd) & (_PAGE_DIRTY | _PAGE_MODIFIED));
527}
528
529static inline pmd_t pmd_mkclean(pmd_t pmd)
530{
531	pmd_val(pmd) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED);
532	return pmd;
533}
534
535static inline pmd_t pmd_mkdirty(pmd_t pmd)
536{
537	pmd_val(pmd) |= _PAGE_MODIFIED;
538	if (pmd_val(pmd) & _PAGE_WRITE)
539		pmd_val(pmd) |= _PAGE_DIRTY;
540	return pmd;
541}
542
543#define pmd_young pmd_young
544static inline int pmd_young(pmd_t pmd)
545{
546	return !!(pmd_val(pmd) & _PAGE_ACCESSED);
547}
548
549static inline pmd_t pmd_mkold(pmd_t pmd)
550{
551	pmd_val(pmd) &= ~_PAGE_ACCESSED;
552	return pmd;
553}
554
555static inline pmd_t pmd_mkyoung(pmd_t pmd)
556{
557	pmd_val(pmd) |= _PAGE_ACCESSED;
558	return pmd;
559}
560
561static inline struct page *pmd_page(pmd_t pmd)
562{
563	if (pmd_trans_huge(pmd))
564		return pfn_to_page(pmd_pfn(pmd));
565
566	return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
567}
568
569static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
570{
571	pmd_val(pmd) = (pmd_val(pmd) & _HPAGE_CHG_MASK) |
572				(pgprot_val(newprot) & ~_HPAGE_CHG_MASK);
573	return pmd;
574}
575
576static inline pmd_t pmd_mkinvalid(pmd_t pmd)
577{
578	pmd_val(pmd) |= _PAGE_PRESENT_INVALID;
579	pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY | _PAGE_PROTNONE);
580
581	return pmd;
582}
583
584/*
585 * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
586 * different prototype.
587 */
588#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
589static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
590					    unsigned long address, pmd_t *pmdp)
591{
592	pmd_t old = *pmdp;
593
594	pmd_clear(pmdp);
595
596	return old;
597}
598
599#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
600
601#ifdef CONFIG_NUMA_BALANCING
602static inline long pte_protnone(pte_t pte)
603{
604	return (pte_val(pte) & _PAGE_PROTNONE);
605}
606
607static inline long pmd_protnone(pmd_t pmd)
608{
609	return (pmd_val(pmd) & _PAGE_PROTNONE);
610}
611#endif /* CONFIG_NUMA_BALANCING */
612
613#define pmd_leaf(pmd)		((pmd_val(pmd) & _PAGE_HUGE) != 0)
614#define pud_leaf(pud)		((pud_val(pud) & _PAGE_HUGE) != 0)
615
616/*
617 * We provide our own get_unmapped area to cope with the virtual aliasing
618 * constraints placed on us by the cache architecture.
619 */
620#define HAVE_ARCH_UNMAPPED_AREA
621#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
622
623#endif /* !__ASSEMBLY__ */
624
625#endif /* _ASM_PGTABLE_H */
626