1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Page table support for the Hexagon architecture
4 *
5 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
6 */
7
8#ifndef _ASM_PGTABLE_H
9#define _ASM_PGTABLE_H
10
11/*
12 * Page table definitions for Qualcomm Hexagon processor.
13 */
14#include <asm/page.h>
15#include <asm-generic/pgtable-nopmd.h>
16
17/* A handy thing to have if one has the RAM. Declared in head.S */
18extern unsigned long empty_zero_page;
19
20/*
21 * The PTE model described here is that of the Hexagon Virtual Machine,
22 * which autonomously walks 2-level page tables.  At a lower level, we
23 * also describe the RISCish software-loaded TLB entry structure of
24 * the underlying Hexagon processor. A kernel built to run on the
25 * virtual machine has no need to know about the underlying hardware.
26 */
27#include <asm/vm_mmu.h>
28
29/*
30 * To maximize the comfort level for the PTE manipulation macros,
31 * define the "well known" architecture-specific bits.
32 */
33#define _PAGE_READ	__HVM_PTE_R
34#define _PAGE_WRITE	__HVM_PTE_W
35#define _PAGE_EXECUTE	__HVM_PTE_X
36#define _PAGE_USER	__HVM_PTE_U
37
38/*
39 * We have a total of 4 "soft" bits available in the abstract PTE.
40 * The two mandatory software bits are Dirty and Accessed.
41 * To make nonlinear swap work according to the more recent
42 * model, we want a low order "Present" bit to indicate whether
43 * the PTE describes MMU programming or swap space.
44 */
45#define _PAGE_PRESENT	(1<<0)
46#define _PAGE_DIRTY	(1<<1)
47#define _PAGE_ACCESSED	(1<<2)
48
49/*
50 * For now, let's say that Valid and Present are the same thing.
51 * Alternatively, we could say that it's the "or" of R, W, and X
52 * permissions.
53 */
54#define _PAGE_VALID	_PAGE_PRESENT
55
56/*
57 * We're not defining _PAGE_GLOBAL here, since there's no concept
58 * of global pages or ASIDs exposed to the Hexagon Virtual Machine,
59 * and we want to use the same page table structures and macros in
60 * the native kernel as we do in the virtual machine kernel.
61 * So we'll put up with a bit of inefficiency for now...
62 */
63
64/* We borrow bit 6 to store the exclusive marker in swap PTEs. */
65#define _PAGE_SWP_EXCLUSIVE	(1<<6)
66
67/*
68 * Top "FOURTH" level (pgd), which for the Hexagon VM is really
69 * only the second from the bottom, pgd and pud both being collapsed.
70 * Each entry represents 4MB of virtual address space, 4K of table
71 * thus maps the full 4GB.
72 */
73#define PGDIR_SHIFT 22
74#define PTRS_PER_PGD 1024
75
76#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
77#define PGDIR_MASK (~(PGDIR_SIZE-1))
78
79#ifdef CONFIG_PAGE_SIZE_4KB
80#define PTRS_PER_PTE 1024
81#endif
82
83#ifdef CONFIG_PAGE_SIZE_16KB
84#define PTRS_PER_PTE 256
85#endif
86
87#ifdef CONFIG_PAGE_SIZE_64KB
88#define PTRS_PER_PTE 64
89#endif
90
91#ifdef CONFIG_PAGE_SIZE_256KB
92#define PTRS_PER_PTE 16
93#endif
94
95#ifdef CONFIG_PAGE_SIZE_1MB
96#define PTRS_PER_PTE 4
97#endif
98
99/*  Any bigger and the PTE disappears.  */
100#define pgd_ERROR(e) \
101	printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__,\
102		pgd_val(e))
103
104/*
105 * Page Protection Constants. Includes (in this variant) cache attributes.
106 */
107extern unsigned long _dflt_cache_att;
108
109#define PAGE_NONE	__pgprot(_PAGE_PRESENT | _PAGE_USER | \
110				_dflt_cache_att)
111#define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_USER | \
112				_PAGE_READ | _PAGE_EXECUTE | _dflt_cache_att)
113#define PAGE_COPY	PAGE_READONLY
114#define PAGE_EXEC	__pgprot(_PAGE_PRESENT | _PAGE_USER | \
115				_PAGE_READ | _PAGE_EXECUTE | _dflt_cache_att)
116#define PAGE_COPY_EXEC	PAGE_EXEC
117#define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \
118				_PAGE_EXECUTE | _PAGE_WRITE | _dflt_cache_att)
119#define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | _PAGE_READ | \
120				_PAGE_WRITE | _PAGE_EXECUTE | _dflt_cache_att)
121
122
123/*
124 * Aliases for mapping mmap() protection bits to page protections.
125 * These get used for static initialization, so using the _dflt_cache_att
126 * variable for the default cache attribute isn't workable. If the
127 * default gets changed at boot time, the boot option code has to
128 * update data structures like the protaction_map[] array.
129 */
130#define CACHEDEF	(CACHE_DEFAULT << 6)
131
132extern pgd_t swapper_pg_dir[PTRS_PER_PGD];  /* located in head.S */
133
134/*  HUGETLB not working currently  */
135#ifdef CONFIG_HUGETLB_PAGE
136#define pte_mkhuge(pte) __pte((pte_val(pte) & ~0x3) | HVM_HUGEPAGE_SIZE)
137#endif
138
139/*
140 * For now, assume that higher-level code will do TLB/MMU invalidations
141 * and don't insert that overhead into this low-level function.
142 */
143extern void sync_icache_dcache(pte_t pte);
144
145#define pte_present_exec_user(pte) \
146	((pte_val(pte) & (_PAGE_EXECUTE | _PAGE_USER)) == \
147	(_PAGE_EXECUTE | _PAGE_USER))
148
149static inline void set_pte(pte_t *ptep, pte_t pteval)
150{
151	/*  should really be using pte_exec, if it weren't declared later. */
152	if (pte_present_exec_user(pteval))
153		sync_icache_dcache(pteval);
154
155	*ptep = pteval;
156}
157
158/*
159 * For the Hexagon Virtual Machine MMU (or its emulation), a null/invalid
160 * L1 PTE (PMD/PGD) has 7 in the least significant bits. For the L2 PTE
161 * (Linux PTE), the key is to have bits 11..9 all zero.  We'd use 0x7
162 * as a universal null entry, but some of those least significant bits
163 * are interpreted by software.
164 */
165#define _NULL_PMD	0x7
166#define _NULL_PTE	0x0
167
168static inline void pmd_clear(pmd_t *pmd_entry_ptr)
169{
170	 pmd_val(*pmd_entry_ptr) = _NULL_PMD;
171}
172
173/*
174 * Conveniently, a null PTE value is invalid.
175 */
176static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
177				pte_t *ptep)
178{
179	pte_val(*ptep) = _NULL_PTE;
180}
181
182/**
183 * pmd_none - check if pmd_entry is mapped
184 * @pmd_entry:  pmd entry
185 *
186 * MIPS checks it against that "invalid pte table" thing.
187 */
188static inline int pmd_none(pmd_t pmd)
189{
190	return pmd_val(pmd) == _NULL_PMD;
191}
192
193/**
194 * pmd_present - is there a page table behind this?
195 * Essentially the inverse of pmd_none.  We maybe
196 * save an inline instruction by defining it this
197 * way, instead of simply "!pmd_none".
198 */
199static inline int pmd_present(pmd_t pmd)
200{
201	return pmd_val(pmd) != (unsigned long)_NULL_PMD;
202}
203
204/**
205 * pmd_bad - check if a PMD entry is "bad". That might mean swapped out.
206 * As we have no known cause of badness, it's null, as it is for many
207 * architectures.
208 */
209static inline int pmd_bad(pmd_t pmd)
210{
211	return 0;
212}
213
214/*
215 * pmd_pfn - converts a PMD entry to a page frame number
216 */
217#define pmd_pfn(pmd)  (pmd_val(pmd) >> PAGE_SHIFT)
218
219/*
220 * pmd_page - converts a PMD entry to a page pointer
221 */
222#define pmd_page(pmd)  (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
223
224/**
225 * pte_none - check if pte is mapped
226 * @pte: pte_t entry
227 */
228static inline int pte_none(pte_t pte)
229{
230	return pte_val(pte) == _NULL_PTE;
231};
232
233/*
234 * pte_present - check if page is present
235 */
236static inline int pte_present(pte_t pte)
237{
238	return pte_val(pte) & _PAGE_PRESENT;
239}
240
241/* mk_pte - make a PTE out of a page pointer and protection bits */
242#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
243
244/* pte_page - returns a page (frame pointer/descriptor?) based on a PTE */
245#define pte_page(x) pfn_to_page(pte_pfn(x))
246
247/* pte_mkold - mark PTE as not recently accessed */
248static inline pte_t pte_mkold(pte_t pte)
249{
250	pte_val(pte) &= ~_PAGE_ACCESSED;
251	return pte;
252}
253
254/* pte_mkyoung - mark PTE as recently accessed */
255static inline pte_t pte_mkyoung(pte_t pte)
256{
257	pte_val(pte) |= _PAGE_ACCESSED;
258	return pte;
259}
260
261/* pte_mkclean - mark page as in sync with backing store */
262static inline pte_t pte_mkclean(pte_t pte)
263{
264	pte_val(pte) &= ~_PAGE_DIRTY;
265	return pte;
266}
267
268/* pte_mkdirty - mark page as modified */
269static inline pte_t pte_mkdirty(pte_t pte)
270{
271	pte_val(pte) |= _PAGE_DIRTY;
272	return pte;
273}
274
275/* pte_young - "is PTE marked as accessed"? */
276static inline int pte_young(pte_t pte)
277{
278	return pte_val(pte) & _PAGE_ACCESSED;
279}
280
281/* pte_dirty - "is PTE dirty?" */
282static inline int pte_dirty(pte_t pte)
283{
284	return pte_val(pte) & _PAGE_DIRTY;
285}
286
287/* pte_modify - set protection bits on PTE */
288static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
289{
290	pte_val(pte) &= PAGE_MASK;
291	pte_val(pte) |= pgprot_val(prot);
292	return pte;
293}
294
295/* pte_wrprotect - mark page as not writable */
296static inline pte_t pte_wrprotect(pte_t pte)
297{
298	pte_val(pte) &= ~_PAGE_WRITE;
299	return pte;
300}
301
302/* pte_mkwrite - mark page as writable */
303static inline pte_t pte_mkwrite_novma(pte_t pte)
304{
305	pte_val(pte) |= _PAGE_WRITE;
306	return pte;
307}
308
309/* pte_mkexec - mark PTE as executable */
310static inline pte_t pte_mkexec(pte_t pte)
311{
312	pte_val(pte) |= _PAGE_EXECUTE;
313	return pte;
314}
315
316/* pte_read - "is PTE marked as readable?" */
317static inline int pte_read(pte_t pte)
318{
319	return pte_val(pte) & _PAGE_READ;
320}
321
322/* pte_write - "is PTE marked as writable?" */
323static inline int pte_write(pte_t pte)
324{
325	return pte_val(pte) & _PAGE_WRITE;
326}
327
328
329/* pte_exec - "is PTE marked as executable?" */
330static inline int pte_exec(pte_t pte)
331{
332	return pte_val(pte) & _PAGE_EXECUTE;
333}
334
335/* __pte_to_swp_entry - extract swap entry from PTE */
336#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
337
338/* __swp_entry_to_pte - extract PTE from swap entry */
339#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
340
341#define PFN_PTE_SHIFT	PAGE_SHIFT
342/* pfn_pte - convert page number and protection value to page table entry */
343#define pfn_pte(pfn, pgprot) __pte((pfn << PAGE_SHIFT) | pgprot_val(pgprot))
344
345/* pte_pfn - convert pte to page frame number */
346#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
347#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
348
349static inline unsigned long pmd_page_vaddr(pmd_t pmd)
350{
351	return (unsigned long)__va(pmd_val(pmd) & PAGE_MASK);
352}
353
354/* ZERO_PAGE - returns the globally shared zero page */
355#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
356
357/*
358 * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
359 * are !pte_none() && !pte_present().
360 *
361 * Swap/file PTE definitions.  If _PAGE_PRESENT is zero, the rest of the PTE is
362 * interpreted as swap information.  The remaining free bits are interpreted as
363 * listed below.  Rather than have the TLB fill handler test
364 * _PAGE_PRESENT, we're going to reserve the permissions bits and set them to
365 * all zeros for swap entries, which speeds up the miss handler at the cost of
366 * 3 bits of offset.  That trade-off can be revisited if necessary, but Hexagon
367 * processor architecture and target applications suggest a lot of TLB misses
368 * and not much swap space.
369 *
370 * Format of swap PTE:
371 *	bit	0:	Present (zero)
372 *	bits	1-5:	swap type (arch independent layer uses 5 bits max)
373 *	bit	6:	exclusive marker
374 *	bits	7-9:	bits 2:0 of offset
375 *	bits	10-12:	effectively _PAGE_PROTNONE (all zero)
376 *	bits	13-31:  bits 21:3 of swap offset
377 *
378 * The split offset makes some of the following macros a little gnarly,
379 * but there's plenty of precedent for this sort of thing.
380 */
381
382/* Used for swap PTEs */
383#define __swp_type(swp_pte)		(((swp_pte).val >> 1) & 0x1f)
384
385#define __swp_offset(swp_pte) \
386	((((swp_pte).val >> 7) & 0x7) | (((swp_pte).val >> 10) & 0x3ffff8))
387
388#define __swp_entry(type, offset) \
389	((swp_entry_t)	{ \
390		(((type & 0x1f) << 1) | \
391		 ((offset & 0x3ffff8) << 10) | ((offset & 0x7) << 7)) })
392
393static inline int pte_swp_exclusive(pte_t pte)
394{
395	return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
396}
397
398static inline pte_t pte_swp_mkexclusive(pte_t pte)
399{
400	pte_val(pte) |= _PAGE_SWP_EXCLUSIVE;
401	return pte;
402}
403
404static inline pte_t pte_swp_clear_exclusive(pte_t pte)
405{
406	pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE;
407	return pte;
408}
409
410#endif
411