1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H
3#define _ASM_POWERPC_NOHASH_32_PGTABLE_H
4
5#include <asm-generic/pgtable-nopmd.h>
6
7#ifndef __ASSEMBLY__
8#include <linux/sched.h>
9#include <linux/threads.h>
10#include <asm/mmu.h>			/* For sub-arch specific PPC_PIN_SIZE */
11
12#endif /* __ASSEMBLY__ */
13
14#define PTE_INDEX_SIZE	PTE_SHIFT
15#define PMD_INDEX_SIZE	0
16#define PUD_INDEX_SIZE	0
17#define PGD_INDEX_SIZE	(32 - PGDIR_SHIFT)
18
19#define PMD_CACHE_INDEX	PMD_INDEX_SIZE
20#define PUD_CACHE_INDEX	PUD_INDEX_SIZE
21
22#ifndef __ASSEMBLY__
23#define PTE_TABLE_SIZE	(sizeof(pte_t) << PTE_INDEX_SIZE)
24#define PMD_TABLE_SIZE	0
25#define PUD_TABLE_SIZE	0
26#define PGD_TABLE_SIZE	(sizeof(pgd_t) << PGD_INDEX_SIZE)
27
28#define PMD_MASKED_BITS (PTE_TABLE_SIZE - 1)
29#endif	/* __ASSEMBLY__ */
30
31#define PTRS_PER_PTE	(1 << PTE_INDEX_SIZE)
32#define PTRS_PER_PGD	(1 << PGD_INDEX_SIZE)
33
34/*
35 * The normal case is that PTEs are 32-bits and we have a 1-page
36 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages.  -- paulus
37 *
38 * For any >32-bit physical address platform, we can use the following
39 * two level page table layout where the pgdir is 8KB and the MS 13 bits
40 * are an index to the second level table.  The combined pgdir/pmd first
41 * level has 2048 entries and the second level has 512 64-bit PTE entries.
42 * -Matt
43 */
44/* PGDIR_SHIFT determines what a top-level page table entry can map */
45#define PGDIR_SHIFT	(PAGE_SHIFT + PTE_INDEX_SIZE)
46#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
47#define PGDIR_MASK	(~(PGDIR_SIZE-1))
48
49/* Bits to mask out from a PGD to get to the PUD page */
50#define PGD_MASKED_BITS		0
51
52#define USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE)
53
54#define pgd_ERROR(e) \
55	pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
56
57/*
58 * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
59 * value (for now) on others, from where we can start layout kernel
60 * virtual space that goes below PKMAP and FIXMAP
61 */
62
63#define FIXADDR_SIZE	0
64#ifdef CONFIG_KASAN
65#include <asm/kasan.h>
66#define FIXADDR_TOP	(KASAN_SHADOW_START - PAGE_SIZE)
67#else
68#define FIXADDR_TOP	((unsigned long)(-PAGE_SIZE))
69#endif
70
71/*
72 * ioremap_bot starts at that address. Early ioremaps move down from there,
73 * until mem_init() at which point this becomes the top of the vmalloc
74 * and ioremap space
75 */
76#ifdef CONFIG_HIGHMEM
77#define IOREMAP_TOP	PKMAP_BASE
78#else
79#define IOREMAP_TOP	FIXADDR_START
80#endif
81
82/* PPC32 shares vmalloc area with ioremap */
83#define IOREMAP_START	VMALLOC_START
84#define IOREMAP_END	VMALLOC_END
85
86/*
87 * Just any arbitrary offset to the start of the vmalloc VM area: the
88 * current 16MB value just means that there will be a 64MB "hole" after the
89 * physical memory until the kernel virtual memory starts.  That means that
90 * any out-of-bounds memory accesses will hopefully be caught.
91 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
92 * area for the same reason. ;)
93 *
94 * We no longer map larger than phys RAM with the BATs so we don't have
95 * to worry about the VMALLOC_OFFSET causing problems.  We do have to worry
96 * about clashes between our early calls to ioremap() that start growing down
97 * from IOREMAP_TOP being run into the VM area allocations (growing upwards
98 * from VMALLOC_START).  For this reason we have ioremap_bot to check when
99 * we actually run into our mappings setup in the early boot with the VM
100 * system.  This really does become a problem for machines with good amounts
101 * of RAM.  -- Cort
102 */
103#define VMALLOC_OFFSET (0x1000000) /* 16M */
104#ifdef PPC_PIN_SIZE
105#define VMALLOC_START (((ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
106#else
107#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
108#endif
109
110#ifdef CONFIG_KASAN_VMALLOC
111#define VMALLOC_END	ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
112#else
113#define VMALLOC_END	ioremap_bot
114#endif
115
116/*
117 * Bits in a linux-style PTE.  These match the bits in the
118 * (hardware-defined) PowerPC PTE as closely as possible.
119 */
120
121#if defined(CONFIG_40x)
122#include <asm/nohash/32/pte-40x.h>
123#elif defined(CONFIG_44x)
124#include <asm/nohash/32/pte-44x.h>
125#elif defined(CONFIG_PPC_85xx) && defined(CONFIG_PTE_64BIT)
126#include <asm/nohash/pte-e500.h>
127#elif defined(CONFIG_PPC_85xx)
128#include <asm/nohash/32/pte-85xx.h>
129#elif defined(CONFIG_PPC_8xx)
130#include <asm/nohash/32/pte-8xx.h>
131#endif
132
133/*
134 * Location of the PFN in the PTE. Most 32-bit platforms use the same
135 * as _PAGE_SHIFT here (ie, naturally aligned).
136 * Platform who don't just pre-define the value so we don't override it here.
137 */
138#ifndef PTE_RPN_SHIFT
139#define PTE_RPN_SHIFT	(PAGE_SHIFT)
140#endif
141
142/*
143 * The mask covered by the RPN must be a ULL on 32-bit platforms with
144 * 64-bit PTEs.
145 */
146#ifdef CONFIG_PTE_64BIT
147#define PTE_RPN_MASK	(~((1ULL << PTE_RPN_SHIFT) - 1))
148#define MAX_POSSIBLE_PHYSMEM_BITS 36
149#else
150#define PTE_RPN_MASK	(~((1UL << PTE_RPN_SHIFT) - 1))
151#define MAX_POSSIBLE_PHYSMEM_BITS 32
152#endif
153
154#ifndef __ASSEMBLY__
155
156#define pmd_none(pmd)		(!pmd_val(pmd))
157#define	pmd_bad(pmd)		(pmd_val(pmd) & _PMD_BAD)
158#define	pmd_present(pmd)	(pmd_val(pmd) & _PMD_PRESENT_MASK)
159static inline void pmd_clear(pmd_t *pmdp)
160{
161	*pmdp = __pmd(0);
162}
163
164/*
165 * Note that on Book E processors, the pmd contains the kernel virtual
166 * (lowmem) address of the pte page.  The physical address is less useful
167 * because everything runs with translation enabled (even the TLB miss
168 * handler).  On everything else the pmd contains the physical address
169 * of the pte page.  -- paulus
170 */
171#ifndef CONFIG_BOOKE
172#define pmd_pfn(pmd)		(pmd_val(pmd) >> PAGE_SHIFT)
173#else
174#define pmd_page_vaddr(pmd)	\
175	((const void *)(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
176#define pmd_pfn(pmd)		(__pa(pmd_val(pmd)) >> PAGE_SHIFT)
177#endif
178
179#define pmd_page(pmd)		pfn_to_page(pmd_pfn(pmd))
180
181/*
182 * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
183 * are !pte_none() && !pte_present().
184 *
185 * Format of swap PTEs (32bit PTEs):
186 *
187 *                         1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3
188 *   0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
189 *   <------------------ offset -------------------> < type -> E 0 0
190 *
191 * E is the exclusive marker that is not stored in swap entries.
192 *
193 * For 64bit PTEs, the offset is extended by 32bit.
194 */
195#define __swp_type(entry)		((entry).val & 0x1f)
196#define __swp_offset(entry)		((entry).val >> 5)
197#define __swp_entry(type, offset)	((swp_entry_t) { ((type) & 0x1f) | ((offset) << 5) })
198#define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) >> 3 })
199#define __swp_entry_to_pte(x)		((pte_t) { (x).val << 3 })
200
201/* We borrow LSB 2 to store the exclusive marker in swap PTEs. */
202#define _PAGE_SWP_EXCLUSIVE	0x000004
203
204#endif /* !__ASSEMBLY__ */
205
206#endif /* __ASM_POWERPC_NOHASH_32_PGTABLE_H */
207