1// SPDX-License-Identifier: GPL-2.0-only
2// Copyright 2023 Google LLC
3// Author: Ard Biesheuvel <ardb@google.com>
4
5#include <linux/init.h>
6#include <linux/libfdt.h>
7#include <linux/linkage.h>
8#include <linux/types.h>
9#include <linux/sizes.h>
10#include <linux/string.h>
11
12#include <asm/memory.h>
13#include <asm/pgalloc.h>
14#include <asm/pgtable.h>
15#include <asm/tlbflush.h>
16
17#include "pi.h"
18
19extern const u8 __eh_frame_start[], __eh_frame_end[];
20
21extern void idmap_cpu_replace_ttbr1(void *pgdir);
22
23static void __init map_segment(pgd_t *pg_dir, u64 *pgd, u64 va_offset,
24			       void *start, void *end, pgprot_t prot,
25			       bool may_use_cont, int root_level)
26{
27	map_range(pgd, ((u64)start + va_offset) & ~PAGE_OFFSET,
28		  ((u64)end + va_offset) & ~PAGE_OFFSET, (u64)start,
29		  prot, root_level, (pte_t *)pg_dir, may_use_cont, 0);
30}
31
32static void __init unmap_segment(pgd_t *pg_dir, u64 va_offset, void *start,
33				 void *end, int root_level)
34{
35	map_segment(pg_dir, NULL, va_offset, start, end, __pgprot(0),
36		    false, root_level);
37}
38
39static void __init map_kernel(u64 kaslr_offset, u64 va_offset, int root_level)
40{
41	bool enable_scs = IS_ENABLED(CONFIG_UNWIND_PATCH_PAC_INTO_SCS);
42	bool twopass = IS_ENABLED(CONFIG_RELOCATABLE);
43	u64 pgdp = (u64)init_pg_dir + PAGE_SIZE;
44	pgprot_t text_prot = PAGE_KERNEL_ROX;
45	pgprot_t data_prot = PAGE_KERNEL;
46	pgprot_t prot;
47
48	/*
49	 * External debuggers may need to write directly to the text mapping to
50	 * install SW breakpoints. Allow this (only) when explicitly requested
51	 * with rodata=off.
52	 */
53	if (arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_RODATA_OFF))
54		text_prot = PAGE_KERNEL_EXEC;
55
56	/*
57	 * We only enable the shadow call stack dynamically if we are running
58	 * on a system that does not implement PAC or BTI. PAC and SCS provide
59	 * roughly the same level of protection, and BTI relies on the PACIASP
60	 * instructions serving as landing pads, preventing us from patching
61	 * those instructions into something else.
62	 */
63	if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL) && cpu_has_pac())
64		enable_scs = false;
65
66	if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && cpu_has_bti()) {
67		enable_scs = false;
68
69		/*
70		 * If we have a CPU that supports BTI and a kernel built for
71		 * BTI then mark the kernel executable text as guarded pages
72		 * now so we don't have to rewrite the page tables later.
73		 */
74		text_prot = __pgprot_modify(text_prot, PTE_GP, PTE_GP);
75	}
76
77	/* Map all code read-write on the first pass if needed */
78	twopass |= enable_scs;
79	prot = twopass ? data_prot : text_prot;
80
81	map_segment(init_pg_dir, &pgdp, va_offset, _stext, _etext, prot,
82		    !twopass, root_level);
83	map_segment(init_pg_dir, &pgdp, va_offset, __start_rodata,
84		    __inittext_begin, data_prot, false, root_level);
85	map_segment(init_pg_dir, &pgdp, va_offset, __inittext_begin,
86		    __inittext_end, prot, false, root_level);
87	map_segment(init_pg_dir, &pgdp, va_offset, __initdata_begin,
88		    __initdata_end, data_prot, false, root_level);
89	map_segment(init_pg_dir, &pgdp, va_offset, _data, _end, data_prot,
90		    true, root_level);
91	dsb(ishst);
92
93	idmap_cpu_replace_ttbr1(init_pg_dir);
94
95	if (twopass) {
96		if (IS_ENABLED(CONFIG_RELOCATABLE))
97			relocate_kernel(kaslr_offset);
98
99		if (enable_scs) {
100			scs_patch(__eh_frame_start + va_offset,
101				  __eh_frame_end - __eh_frame_start);
102			asm("ic ialluis");
103
104			dynamic_scs_is_enabled = true;
105		}
106
107		/*
108		 * Unmap the text region before remapping it, to avoid
109		 * potential TLB conflicts when creating the contiguous
110		 * descriptors.
111		 */
112		unmap_segment(init_pg_dir, va_offset, _stext, _etext,
113			      root_level);
114		dsb(ishst);
115		isb();
116		__tlbi(vmalle1);
117		isb();
118
119		/*
120		 * Remap these segments with different permissions
121		 * No new page table allocations should be needed
122		 */
123		map_segment(init_pg_dir, NULL, va_offset, _stext, _etext,
124			    text_prot, true, root_level);
125		map_segment(init_pg_dir, NULL, va_offset, __inittext_begin,
126			    __inittext_end, text_prot, false, root_level);
127	}
128
129	/* Copy the root page table to its final location */
130	memcpy((void *)swapper_pg_dir + va_offset, init_pg_dir, PAGE_SIZE);
131	dsb(ishst);
132	idmap_cpu_replace_ttbr1(swapper_pg_dir);
133}
134
135static void noinline __section(".idmap.text") set_ttbr0_for_lpa2(u64 ttbr)
136{
137	u64 sctlr = read_sysreg(sctlr_el1);
138	u64 tcr = read_sysreg(tcr_el1) | TCR_DS;
139
140	asm("	msr	sctlr_el1, %0		;"
141	    "	isb				;"
142	    "   msr     ttbr0_el1, %1		;"
143	    "   msr     tcr_el1, %2		;"
144	    "	isb				;"
145	    "	tlbi    vmalle1			;"
146	    "	dsb     nsh			;"
147	    "	isb				;"
148	    "	msr     sctlr_el1, %3		;"
149	    "	isb				;"
150	    ::	"r"(sctlr & ~SCTLR_ELx_M), "r"(ttbr), "r"(tcr), "r"(sctlr));
151}
152
153static void __init remap_idmap_for_lpa2(void)
154{
155	/* clear the bits that change meaning once LPA2 is turned on */
156	pteval_t mask = PTE_SHARED;
157
158	/*
159	 * We have to clear bits [9:8] in all block or page descriptors in the
160	 * initial ID map, as otherwise they will be (mis)interpreted as
161	 * physical address bits once we flick the LPA2 switch (TCR.DS). Since
162	 * we cannot manipulate live descriptors in that way without creating
163	 * potential TLB conflicts, let's create another temporary ID map in a
164	 * LPA2 compatible fashion, and update the initial ID map while running
165	 * from that.
166	 */
167	create_init_idmap(init_pg_dir, mask);
168	dsb(ishst);
169	set_ttbr0_for_lpa2((u64)init_pg_dir);
170
171	/*
172	 * Recreate the initial ID map with the same granularity as before.
173	 * Don't bother with the FDT, we no longer need it after this.
174	 */
175	memset(init_idmap_pg_dir, 0,
176	       (u64)init_idmap_pg_dir - (u64)init_idmap_pg_end);
177
178	create_init_idmap(init_idmap_pg_dir, mask);
179	dsb(ishst);
180
181	/* switch back to the updated initial ID map */
182	set_ttbr0_for_lpa2((u64)init_idmap_pg_dir);
183
184	/* wipe the temporary ID map from memory */
185	memset(init_pg_dir, 0, (u64)init_pg_end - (u64)init_pg_dir);
186}
187
188static void __init map_fdt(u64 fdt)
189{
190	static u8 ptes[INIT_IDMAP_FDT_SIZE] __initdata __aligned(PAGE_SIZE);
191	u64 efdt = fdt + MAX_FDT_SIZE;
192	u64 ptep = (u64)ptes;
193
194	/*
195	 * Map up to MAX_FDT_SIZE bytes, but avoid overlap with
196	 * the kernel image.
197	 */
198	map_range(&ptep, fdt, (u64)_text > fdt ? min((u64)_text, efdt) : efdt,
199		  fdt, PAGE_KERNEL, IDMAP_ROOT_LEVEL,
200		  (pte_t *)init_idmap_pg_dir, false, 0);
201	dsb(ishst);
202}
203
204asmlinkage void __init early_map_kernel(u64 boot_status, void *fdt)
205{
206	static char const chosen_str[] __initconst = "/chosen";
207	u64 va_base, pa_base = (u64)&_text;
208	u64 kaslr_offset = pa_base % MIN_KIMG_ALIGN;
209	int root_level = 4 - CONFIG_PGTABLE_LEVELS;
210	int va_bits = VA_BITS;
211	int chosen;
212
213	map_fdt((u64)fdt);
214
215	/* Clear BSS and the initial page tables */
216	memset(__bss_start, 0, (u64)init_pg_end - (u64)__bss_start);
217
218	/* Parse the command line for CPU feature overrides */
219	chosen = fdt_path_offset(fdt, chosen_str);
220	init_feature_override(boot_status, fdt, chosen);
221
222	if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && !cpu_has_lva()) {
223		va_bits = VA_BITS_MIN;
224	} else if (IS_ENABLED(CONFIG_ARM64_LPA2) && !cpu_has_lpa2()) {
225		va_bits = VA_BITS_MIN;
226		root_level++;
227	}
228
229	if (va_bits > VA_BITS_MIN)
230		sysreg_clear_set(tcr_el1, TCR_T1SZ_MASK, TCR_T1SZ(va_bits));
231
232	/*
233	 * The virtual KASLR displacement modulo 2MiB is decided by the
234	 * physical placement of the image, as otherwise, we might not be able
235	 * to create the early kernel mapping using 2 MiB block descriptors. So
236	 * take the low bits of the KASLR offset from the physical address, and
237	 * fill in the high bits from the seed.
238	 */
239	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
240		u64 kaslr_seed = kaslr_early_init(fdt, chosen);
241
242		if (kaslr_seed && kaslr_requires_kpti())
243			arm64_use_ng_mappings = true;
244
245		kaslr_offset |= kaslr_seed & ~(MIN_KIMG_ALIGN - 1);
246	}
247
248	if (IS_ENABLED(CONFIG_ARM64_LPA2) && va_bits > VA_BITS_MIN)
249		remap_idmap_for_lpa2();
250
251	va_base = KIMAGE_VADDR + kaslr_offset;
252	map_kernel(kaslr_offset, va_base - pa_base, root_level);
253}
254