1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _SPARC_PGTABLE_H
3#define _SPARC_PGTABLE_H
4
5/*  asm/pgtable.h:  Defines and functions used to work
6 *                        with Sparc page tables.
7 *
8 *  Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
9 *  Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
10 */
11
12#include <linux/const.h>
13
14#define PMD_SHIFT		18
15#define PMD_SIZE        	(1UL << PMD_SHIFT)
16#define PMD_MASK        	(~(PMD_SIZE-1))
17#define PMD_ALIGN(__addr) 	(((__addr) + ~PMD_MASK) & PMD_MASK)
18
19#define PGDIR_SHIFT     	24
20#define PGDIR_SIZE      	(1UL << PGDIR_SHIFT)
21#define PGDIR_MASK      	(~(PGDIR_SIZE-1))
22#define PGDIR_ALIGN(__addr) 	(((__addr) + ~PGDIR_MASK) & PGDIR_MASK)
23
24#ifndef __ASSEMBLY__
25#include <asm-generic/pgtable-nopud.h>
26
27#include <linux/spinlock.h>
28#include <linux/mm_types.h>
29#include <asm/types.h>
30#include <asm/pgtsrmmu.h>
31#include <asm/vaddrs.h>
32#include <asm/oplib.h>
33#include <asm/cpu_type.h>
34
35
36struct vm_area_struct;
37struct page;
38
39void load_mmu(void);
40unsigned long calc_highpages(void);
41unsigned long __init bootmem_init(unsigned long *pages_avail);
42
43#define pte_ERROR(e)   __builtin_trap()
44#define pmd_ERROR(e)   __builtin_trap()
45#define pgd_ERROR(e)   __builtin_trap()
46
47#define PTRS_PER_PTE    	64
48#define PTRS_PER_PMD    	64
49#define PTRS_PER_PGD    	256
50#define USER_PTRS_PER_PGD	PAGE_OFFSET / PGDIR_SIZE
51#define PTE_SIZE		(PTRS_PER_PTE*4)
52
53#define PAGE_NONE	SRMMU_PAGE_NONE
54#define PAGE_SHARED	SRMMU_PAGE_SHARED
55#define PAGE_COPY	SRMMU_PAGE_COPY
56#define PAGE_READONLY	SRMMU_PAGE_RDONLY
57#define PAGE_KERNEL	SRMMU_PAGE_KERNEL
58
59/* Top-level page directory - dummy used by init-mm.
60 * srmmu.c will assign the real one (which is dynamically sized) */
61#define swapper_pg_dir NULL
62
63void paging_init(void);
64
65extern unsigned long ptr_in_current_pgd;
66
67/* First physical page can be anywhere, the following is needed so that
68 * va-->pa and vice versa conversions work properly without performance
69 * hit for all __pa()/__va() operations.
70 */
71extern unsigned long phys_base;
72extern unsigned long pfn_base;
73
74/*
75 * ZERO_PAGE is a global shared page that is always zero: used
76 * for zero-mapped memory areas etc..
77 */
78extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
79
80#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
81
82/*
83 * In general all page table modifications should use the V8 atomic
84 * swap instruction.  This insures the mmu and the cpu are in sync
85 * with respect to ref/mod bits in the page tables.
86 */
87static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
88{
89	__asm__ __volatile__("swap [%2], %0" :
90			"=&r" (value) : "0" (value), "r" (addr) : "memory");
91	return value;
92}
93
94/* Certain architectures need to do special things when pte's
95 * within a page table are directly modified.  Thus, the following
96 * hook is made available.
97 */
98
99static inline void set_pte(pte_t *ptep, pte_t pteval)
100{
101	srmmu_swap((unsigned long *)ptep, pte_val(pteval));
102}
103
104static inline int srmmu_device_memory(unsigned long x)
105{
106	return ((x & 0xF0000000) != 0);
107}
108
109static inline unsigned long pmd_pfn(pmd_t pmd)
110{
111	return (pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4);
112}
113
114static inline struct page *pmd_page(pmd_t pmd)
115{
116	if (srmmu_device_memory(pmd_val(pmd)))
117		BUG();
118	return pfn_to_page(pmd_pfn(pmd));
119}
120
121static inline unsigned long __pmd_page(pmd_t pmd)
122{
123	unsigned long v;
124
125	if (srmmu_device_memory(pmd_val(pmd)))
126		BUG();
127
128	v = pmd_val(pmd) & SRMMU_PTD_PMASK;
129	return (unsigned long)__nocache_va(v << 4);
130}
131
132static inline unsigned long pmd_page_vaddr(pmd_t pmd)
133{
134	unsigned long v = pmd_val(pmd) & SRMMU_PTD_PMASK;
135	return (unsigned long)__nocache_va(v << 4);
136}
137
138static inline pmd_t *pud_pgtable(pud_t pud)
139{
140	if (srmmu_device_memory(pud_val(pud))) {
141		return (pmd_t *)~0;
142	} else {
143		unsigned long v = pud_val(pud) & SRMMU_PTD_PMASK;
144		return (pmd_t *)__nocache_va(v << 4);
145	}
146}
147
148static inline int pte_present(pte_t pte)
149{
150	return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE);
151}
152
153static inline int pte_none(pte_t pte)
154{
155	return !pte_val(pte);
156}
157
158static inline void __pte_clear(pte_t *ptep)
159{
160	set_pte(ptep, __pte(0));
161}
162
163static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
164{
165	__pte_clear(ptep);
166}
167
168static inline int pmd_bad(pmd_t pmd)
169{
170	return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
171}
172
173static inline int pmd_present(pmd_t pmd)
174{
175	return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
176}
177
178static inline int pmd_none(pmd_t pmd)
179{
180	return !pmd_val(pmd);
181}
182
183static inline void pmd_clear(pmd_t *pmdp)
184{
185	set_pte((pte_t *)&pmd_val(*pmdp), __pte(0));
186}
187
188static inline int pud_none(pud_t pud)
189{
190	return !(pud_val(pud) & 0xFFFFFFF);
191}
192
193static inline int pud_bad(pud_t pud)
194{
195	return (pud_val(pud) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
196}
197
198static inline int pud_present(pud_t pud)
199{
200	return ((pud_val(pud) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
201}
202
203static inline void pud_clear(pud_t *pudp)
204{
205	set_pte((pte_t *)pudp, __pte(0));
206}
207
208/*
209 * The following only work if pte_present() is true.
210 * Undefined behaviour if not..
211 */
212static inline int pte_write(pte_t pte)
213{
214	return pte_val(pte) & SRMMU_WRITE;
215}
216
217static inline int pte_dirty(pte_t pte)
218{
219	return pte_val(pte) & SRMMU_DIRTY;
220}
221
222static inline int pte_young(pte_t pte)
223{
224	return pte_val(pte) & SRMMU_REF;
225}
226
227static inline pte_t pte_wrprotect(pte_t pte)
228{
229	return __pte(pte_val(pte) & ~SRMMU_WRITE);
230}
231
232static inline pte_t pte_mkclean(pte_t pte)
233{
234	return __pte(pte_val(pte) & ~SRMMU_DIRTY);
235}
236
237static inline pte_t pte_mkold(pte_t pte)
238{
239	return __pte(pte_val(pte) & ~SRMMU_REF);
240}
241
242static inline pte_t pte_mkwrite_novma(pte_t pte)
243{
244	return __pte(pte_val(pte) | SRMMU_WRITE);
245}
246
247static inline pte_t pte_mkdirty(pte_t pte)
248{
249	return __pte(pte_val(pte) | SRMMU_DIRTY);
250}
251
252static inline pte_t pte_mkyoung(pte_t pte)
253{
254	return __pte(pte_val(pte) | SRMMU_REF);
255}
256
257#define PFN_PTE_SHIFT			(PAGE_SHIFT - 4)
258#define pfn_pte(pfn, prot)		mk_pte(pfn_to_page(pfn), prot)
259
260static inline unsigned long pte_pfn(pte_t pte)
261{
262	if (srmmu_device_memory(pte_val(pte))) {
263		/* Just return something that will cause
264		 * pfn_valid() to return false.  This makes
265		 * copy_one_pte() to just directly copy to
266		 * PTE over.
267		 */
268		return ~0UL;
269	}
270	return (pte_val(pte) & SRMMU_PTE_PMASK) >> PFN_PTE_SHIFT;
271}
272
273#define pte_page(pte)	pfn_to_page(pte_pfn(pte))
274
275/*
276 * Conversion functions: convert a page and protection to a page entry,
277 * and a page entry and page directory to the page they refer to.
278 */
279static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
280{
281	return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot));
282}
283
284static inline pte_t mk_pte_phys(unsigned long page, pgprot_t pgprot)
285{
286	return __pte(((page) >> 4) | pgprot_val(pgprot));
287}
288
289static inline pte_t mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
290{
291	return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot));
292}
293
294#define pgprot_noncached pgprot_noncached
295static inline pgprot_t pgprot_noncached(pgprot_t prot)
296{
297	pgprot_val(prot) &= ~pgprot_val(__pgprot(SRMMU_CACHE));
298	return prot;
299}
300
301static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__;
302static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
303{
304	return __pte((pte_val(pte) & SRMMU_CHG_MASK) |
305		pgprot_val(newprot));
306}
307
308/* only used by the huge vmap code, should never be called */
309#define pud_page(pud)			NULL
310
311struct seq_file;
312void mmu_info(struct seq_file *m);
313
314/* Fault handler stuff... */
315#define FAULT_CODE_PROT     0x1
316#define FAULT_CODE_WRITE    0x2
317#define FAULT_CODE_USER     0x4
318
319#define update_mmu_cache(vma, address, ptep) do { } while (0)
320#define update_mmu_cache_range(vmf, vma, address, ptep, nr) do { } while (0)
321
322void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
323                      unsigned long xva, unsigned int len);
324void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len);
325
326/*
327 * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
328 * are !pte_none() && !pte_present().
329 *
330 * Format of swap PTEs:
331 *
332 *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
333 *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
334 *   <-------------- offset ---------------> < type -> E 0 0 0 0 0 0
335 */
336static inline unsigned long __swp_type(swp_entry_t entry)
337{
338	return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK;
339}
340
341static inline unsigned long __swp_offset(swp_entry_t entry)
342{
343	return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK;
344}
345
346static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
347{
348	return (swp_entry_t) {
349		(type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT
350		| (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT };
351}
352
353#define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
354#define __swp_entry_to_pte(x)		((pte_t) { (x).val })
355
356static inline int pte_swp_exclusive(pte_t pte)
357{
358	return pte_val(pte) & SRMMU_SWP_EXCLUSIVE;
359}
360
361static inline pte_t pte_swp_mkexclusive(pte_t pte)
362{
363	return __pte(pte_val(pte) | SRMMU_SWP_EXCLUSIVE);
364}
365
366static inline pte_t pte_swp_clear_exclusive(pte_t pte)
367{
368	return __pte(pte_val(pte) & ~SRMMU_SWP_EXCLUSIVE);
369}
370
371static inline unsigned long
372__get_phys (unsigned long addr)
373{
374	switch (sparc_cpu_model){
375	case sun4m:
376	case sun4d:
377		return ((srmmu_get_pte (addr) & 0xffffff00) << 4);
378	default:
379		return 0;
380	}
381}
382
383static inline int
384__get_iospace (unsigned long addr)
385{
386	switch (sparc_cpu_model){
387	case sun4m:
388	case sun4d:
389		return (srmmu_get_pte (addr) >> 28);
390	default:
391		return -1;
392	}
393}
394
395/*
396 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
397 * its high 4 bits.  These macros/functions put it there or get it from there.
398 */
399#define MK_IOSPACE_PFN(space, pfn)	(pfn | (space << (BITS_PER_LONG - 4)))
400#define GET_IOSPACE(pfn)		(pfn >> (BITS_PER_LONG - 4))
401#define GET_PFN(pfn)			(pfn & 0x0fffffffUL)
402
403int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
404		    unsigned long, pgprot_t);
405
406static inline int io_remap_pfn_range(struct vm_area_struct *vma,
407				     unsigned long from, unsigned long pfn,
408				     unsigned long size, pgprot_t prot)
409{
410	unsigned long long offset, space, phys_base;
411
412	offset = ((unsigned long long) GET_PFN(pfn)) << PAGE_SHIFT;
413	space = GET_IOSPACE(pfn);
414	phys_base = offset | (space << 32ULL);
415
416	return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
417}
418#define io_remap_pfn_range io_remap_pfn_range
419
420#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
421#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
422({									  \
423	int __changed = !pte_same(*(__ptep), __entry);			  \
424	if (__changed) {						  \
425		set_pte(__ptep, __entry);				  \
426		flush_tlb_page(__vma, __address);			  \
427	}								  \
428	__changed;							  \
429})
430
431#endif /* !(__ASSEMBLY__) */
432
433#define VMALLOC_START           _AC(0xfe600000,UL)
434#define VMALLOC_END             _AC(0xffc00000,UL)
435
436/* We provide our own get_unmapped_area to cope with VA holes for userland */
437#define HAVE_ARCH_UNMAPPED_AREA
438
439#define pmd_pgtable(pmd)	((pgtable_t)__pmd_page(pmd))
440
441#endif /* !(_SPARC_PGTABLE_H) */
442