1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_PGTABLE_64_DEFS_H
3#define _ASM_X86_PGTABLE_64_DEFS_H
4
5#include <asm/sparsemem.h>
6
7#ifndef __ASSEMBLY__
8#include <linux/types.h>
9#include <asm/kaslr.h>
10
11/*
12 * These are used to make use of C type-checking..
13 */
14typedef unsigned long	pteval_t;
15typedef unsigned long	pmdval_t;
16typedef unsigned long	pudval_t;
17typedef unsigned long	p4dval_t;
18typedef unsigned long	pgdval_t;
19typedef unsigned long	pgprotval_t;
20
21typedef struct { pteval_t pte; } pte_t;
22typedef struct { pmdval_t pmd; } pmd_t;
23
24extern unsigned int __pgtable_l5_enabled;
25
26#ifdef CONFIG_X86_5LEVEL
27#ifdef USE_EARLY_PGTABLE_L5
28/*
29 * cpu_feature_enabled() is not available in early boot code.
30 * Use variable instead.
31 */
32static inline bool pgtable_l5_enabled(void)
33{
34	return __pgtable_l5_enabled;
35}
36#else
37#define pgtable_l5_enabled() cpu_feature_enabled(X86_FEATURE_LA57)
38#endif /* USE_EARLY_PGTABLE_L5 */
39
40#else
41#define pgtable_l5_enabled() 0
42#endif /* CONFIG_X86_5LEVEL */
43
44extern unsigned int pgdir_shift;
45extern unsigned int ptrs_per_p4d;
46
47#endif	/* !__ASSEMBLY__ */
48
49#define SHARED_KERNEL_PMD	0
50
51#ifdef CONFIG_X86_5LEVEL
52
53/*
54 * PGDIR_SHIFT determines what a top-level page table entry can map
55 */
56#define PGDIR_SHIFT	pgdir_shift
57#define PTRS_PER_PGD	512
58
59/*
60 * 4th level page in 5-level paging case
61 */
62#define P4D_SHIFT		39
63#define MAX_PTRS_PER_P4D	512
64#define PTRS_PER_P4D		ptrs_per_p4d
65#define P4D_SIZE		(_AC(1, UL) << P4D_SHIFT)
66#define P4D_MASK		(~(P4D_SIZE - 1))
67
68#define MAX_POSSIBLE_PHYSMEM_BITS	52
69
70#else /* CONFIG_X86_5LEVEL */
71
72/*
73 * PGDIR_SHIFT determines what a top-level page table entry can map
74 */
75#define PGDIR_SHIFT		39
76#define PTRS_PER_PGD		512
77#define MAX_PTRS_PER_P4D	1
78
79#endif /* CONFIG_X86_5LEVEL */
80
81/*
82 * 3rd level page
83 */
84#define PUD_SHIFT	30
85#define PTRS_PER_PUD	512
86
87/*
88 * PMD_SHIFT determines the size of the area a middle-level
89 * page table can map
90 */
91#define PMD_SHIFT	21
92#define PTRS_PER_PMD	512
93
94/*
95 * entries per page directory level
96 */
97#define PTRS_PER_PTE	512
98
99#define PMD_SIZE	(_AC(1, UL) << PMD_SHIFT)
100#define PMD_MASK	(~(PMD_SIZE - 1))
101#define PUD_SIZE	(_AC(1, UL) << PUD_SHIFT)
102#define PUD_MASK	(~(PUD_SIZE - 1))
103#define PGDIR_SIZE	(_AC(1, UL) << PGDIR_SHIFT)
104#define PGDIR_MASK	(~(PGDIR_SIZE - 1))
105
106/*
107 * See Documentation/arch/x86/x86_64/mm.rst for a description of the memory map.
108 *
109 * Be very careful vs. KASLR when changing anything here. The KASLR address
110 * range must not overlap with anything except the KASAN shadow area, which
111 * is correct as KASAN disables KASLR.
112 */
113#define MAXMEM			(1UL << MAX_PHYSMEM_BITS)
114
115#define GUARD_HOLE_PGD_ENTRY	-256UL
116#define GUARD_HOLE_SIZE		(16UL << PGDIR_SHIFT)
117#define GUARD_HOLE_BASE_ADDR	(GUARD_HOLE_PGD_ENTRY << PGDIR_SHIFT)
118#define GUARD_HOLE_END_ADDR	(GUARD_HOLE_BASE_ADDR + GUARD_HOLE_SIZE)
119
120#define LDT_PGD_ENTRY		-240UL
121#define LDT_BASE_ADDR		(LDT_PGD_ENTRY << PGDIR_SHIFT)
122#define LDT_END_ADDR		(LDT_BASE_ADDR + PGDIR_SIZE)
123
124#define __VMALLOC_BASE_L4	0xffffc90000000000UL
125#define __VMALLOC_BASE_L5 	0xffa0000000000000UL
126
127#define VMALLOC_SIZE_TB_L4	32UL
128#define VMALLOC_SIZE_TB_L5	12800UL
129
130#define __VMEMMAP_BASE_L4	0xffffea0000000000UL
131#define __VMEMMAP_BASE_L5	0xffd4000000000000UL
132
133#ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT
134# define VMALLOC_START		vmalloc_base
135# define VMALLOC_SIZE_TB	(pgtable_l5_enabled() ? VMALLOC_SIZE_TB_L5 : VMALLOC_SIZE_TB_L4)
136# define VMEMMAP_START		vmemmap_base
137#else
138# define VMALLOC_START		__VMALLOC_BASE_L4
139# define VMALLOC_SIZE_TB	VMALLOC_SIZE_TB_L4
140# define VMEMMAP_START		__VMEMMAP_BASE_L4
141#endif /* CONFIG_DYNAMIC_MEMORY_LAYOUT */
142
143/*
144 * End of the region for which vmalloc page tables are pre-allocated.
145 * For non-KMSAN builds, this is the same as VMALLOC_END.
146 * For KMSAN builds, VMALLOC_START..VMEMORY_END is 4 times bigger than
147 * VMALLOC_START..VMALLOC_END (see below).
148 */
149#define VMEMORY_END		(VMALLOC_START + (VMALLOC_SIZE_TB << 40) - 1)
150
151#ifndef CONFIG_KMSAN
152#define VMALLOC_END		VMEMORY_END
153#else
154/*
155 * In KMSAN builds vmalloc area is four times smaller, and the remaining 3/4
156 * are used to keep the metadata for virtual pages. The memory formerly
157 * belonging to vmalloc area is now laid out as follows:
158 *
159 * 1st quarter: VMALLOC_START to VMALLOC_END - new vmalloc area
160 * 2nd quarter: KMSAN_VMALLOC_SHADOW_START to
161 *              VMALLOC_END+KMSAN_VMALLOC_SHADOW_OFFSET - vmalloc area shadow
162 * 3rd quarter: KMSAN_VMALLOC_ORIGIN_START to
163 *              VMALLOC_END+KMSAN_VMALLOC_ORIGIN_OFFSET - vmalloc area origins
164 * 4th quarter: KMSAN_MODULES_SHADOW_START to KMSAN_MODULES_ORIGIN_START
165 *              - shadow for modules,
166 *              KMSAN_MODULES_ORIGIN_START to
167 *              KMSAN_MODULES_ORIGIN_START + MODULES_LEN - origins for modules.
168 */
169#define VMALLOC_QUARTER_SIZE	((VMALLOC_SIZE_TB << 40) >> 2)
170#define VMALLOC_END		(VMALLOC_START + VMALLOC_QUARTER_SIZE - 1)
171
172/*
173 * vmalloc metadata addresses are calculated by adding shadow/origin offsets
174 * to vmalloc address.
175 */
176#define KMSAN_VMALLOC_SHADOW_OFFSET	VMALLOC_QUARTER_SIZE
177#define KMSAN_VMALLOC_ORIGIN_OFFSET	(VMALLOC_QUARTER_SIZE << 1)
178
179#define KMSAN_VMALLOC_SHADOW_START	(VMALLOC_START + KMSAN_VMALLOC_SHADOW_OFFSET)
180#define KMSAN_VMALLOC_ORIGIN_START	(VMALLOC_START + KMSAN_VMALLOC_ORIGIN_OFFSET)
181
182/*
183 * The shadow/origin for modules are placed one by one in the last 1/4 of
184 * vmalloc space.
185 */
186#define KMSAN_MODULES_SHADOW_START	(VMALLOC_END + KMSAN_VMALLOC_ORIGIN_OFFSET + 1)
187#define KMSAN_MODULES_ORIGIN_START	(KMSAN_MODULES_SHADOW_START + MODULES_LEN)
188#endif /* CONFIG_KMSAN */
189
190#define MODULES_VADDR		(__START_KERNEL_map + KERNEL_IMAGE_SIZE)
191/* The module sections ends with the start of the fixmap */
192#ifndef CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP
193# define MODULES_END		_AC(0xffffffffff000000, UL)
194#else
195# define MODULES_END		_AC(0xfffffffffe000000, UL)
196#endif
197#define MODULES_LEN		(MODULES_END - MODULES_VADDR)
198
199#define ESPFIX_PGD_ENTRY	_AC(-2, UL)
200#define ESPFIX_BASE_ADDR	(ESPFIX_PGD_ENTRY << P4D_SHIFT)
201
202#define CPU_ENTRY_AREA_PGD	_AC(-4, UL)
203#define CPU_ENTRY_AREA_BASE	(CPU_ENTRY_AREA_PGD << P4D_SHIFT)
204
205#define EFI_VA_START		( -4 * (_AC(1, UL) << 30))
206#define EFI_VA_END		(-68 * (_AC(1, UL) << 30))
207
208#define EARLY_DYNAMIC_PAGE_TABLES	64
209
210#define PGD_KERNEL_START	((PAGE_SIZE / 2) / sizeof(pgd_t))
211
212/*
213 * We borrow bit 3 to remember PG_anon_exclusive.
214 */
215#define _PAGE_SWP_EXCLUSIVE	_PAGE_PWT
216
217#endif /* _ASM_X86_PGTABLE_64_DEFS_H */
218