1// SPDX-License-Identifier: GPL-2.0
2/*
3 * KASAN for 64-bit Book3e powerpc
4 *
5 * Copyright 2022, Christophe Leroy, CS GROUP France
6 */
7
8#define DISABLE_BRANCH_PROFILING
9
10#include <linux/kasan.h>
11#include <linux/printk.h>
12#include <linux/memblock.h>
13#include <linux/set_memory.h>
14
15#include <asm/pgalloc.h>
16
17static inline bool kasan_pud_table(p4d_t p4d)
18{
19	return p4d_page(p4d) == virt_to_page(lm_alias(kasan_early_shadow_pud));
20}
21
22static inline bool kasan_pmd_table(pud_t pud)
23{
24	return pud_page(pud) == virt_to_page(lm_alias(kasan_early_shadow_pmd));
25}
26
27static inline bool kasan_pte_table(pmd_t pmd)
28{
29	return pmd_page(pmd) == virt_to_page(lm_alias(kasan_early_shadow_pte));
30}
31
32static int __init kasan_map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
33{
34	pgd_t *pgdp;
35	p4d_t *p4dp;
36	pud_t *pudp;
37	pmd_t *pmdp;
38	pte_t *ptep;
39
40	pgdp = pgd_offset_k(ea);
41	p4dp = p4d_offset(pgdp, ea);
42	if (kasan_pud_table(*p4dp)) {
43		pudp = memblock_alloc(PUD_TABLE_SIZE, PUD_TABLE_SIZE);
44		memcpy(pudp, kasan_early_shadow_pud, PUD_TABLE_SIZE);
45		p4d_populate(&init_mm, p4dp, pudp);
46	}
47	pudp = pud_offset(p4dp, ea);
48	if (kasan_pmd_table(*pudp)) {
49		pmdp = memblock_alloc(PMD_TABLE_SIZE, PMD_TABLE_SIZE);
50		memcpy(pmdp, kasan_early_shadow_pmd, PMD_TABLE_SIZE);
51		pud_populate(&init_mm, pudp, pmdp);
52	}
53	pmdp = pmd_offset(pudp, ea);
54	if (kasan_pte_table(*pmdp)) {
55		ptep = memblock_alloc(PTE_TABLE_SIZE, PTE_TABLE_SIZE);
56		memcpy(ptep, kasan_early_shadow_pte, PTE_TABLE_SIZE);
57		pmd_populate_kernel(&init_mm, pmdp, ptep);
58	}
59	ptep = pte_offset_kernel(pmdp, ea);
60
61	__set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot), 0);
62
63	return 0;
64}
65
66static void __init kasan_init_phys_region(void *start, void *end)
67{
68	unsigned long k_start, k_end, k_cur;
69	void *va;
70
71	if (start >= end)
72		return;
73
74	k_start = ALIGN_DOWN((unsigned long)kasan_mem_to_shadow(start), PAGE_SIZE);
75	k_end = ALIGN((unsigned long)kasan_mem_to_shadow(end), PAGE_SIZE);
76
77	va = memblock_alloc(k_end - k_start, PAGE_SIZE);
78	for (k_cur = k_start; k_cur < k_end; k_cur += PAGE_SIZE, va += PAGE_SIZE)
79		kasan_map_kernel_page(k_cur, __pa(va), PAGE_KERNEL);
80}
81
82void __init kasan_early_init(void)
83{
84	int i;
85	unsigned long addr;
86	pgd_t *pgd = pgd_offset_k(KASAN_SHADOW_START);
87	pte_t zero_pte = pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL);
88
89	BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
90	BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
91
92	for (i = 0; i < PTRS_PER_PTE; i++)
93		__set_pte_at(&init_mm, (unsigned long)kasan_early_shadow_page,
94			     &kasan_early_shadow_pte[i], zero_pte, 0);
95
96	for (i = 0; i < PTRS_PER_PMD; i++)
97		pmd_populate_kernel(&init_mm, &kasan_early_shadow_pmd[i],
98				    kasan_early_shadow_pte);
99
100	for (i = 0; i < PTRS_PER_PUD; i++)
101		pud_populate(&init_mm, &kasan_early_shadow_pud[i],
102			     kasan_early_shadow_pmd);
103
104	for (addr = KASAN_SHADOW_START; addr != KASAN_SHADOW_END; addr += PGDIR_SIZE)
105		p4d_populate(&init_mm, p4d_offset(pgd++, addr), kasan_early_shadow_pud);
106}
107
108void __init kasan_init(void)
109{
110	phys_addr_t start, end;
111	u64 i;
112	pte_t zero_pte = pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL_RO);
113
114	for_each_mem_range(i, &start, &end)
115		kasan_init_phys_region((void *)start, (void *)end);
116
117	if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
118		kasan_remove_zero_shadow((void *)VMALLOC_START, VMALLOC_SIZE);
119
120	for (i = 0; i < PTRS_PER_PTE; i++)
121		__set_pte_at(&init_mm, (unsigned long)kasan_early_shadow_page,
122			     &kasan_early_shadow_pte[i], zero_pte, 0);
123
124	flush_tlb_kernel_range(KASAN_SHADOW_START, KASAN_SHADOW_END);
125
126	memset(kasan_early_shadow_page, 0, PAGE_SIZE);
127
128	/* Enable error messages */
129	init_task.kasan_depth = 0;
130	pr_info("KASAN init done\n");
131}
132
133void __init kasan_late_init(void) { }
134