1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/pagewalk.h>
4#include <linux/debugfs.h>
5#include <linux/ptdump.h>
6#include <linux/kasan.h>
7
8#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
9/*
10 * This is an optimization for KASAN=y case. Since all kasan page tables
11 * eventually point to the kasan_early_shadow_page we could call note_page()
12 * right away without walking through lower level page tables. This saves
13 * us dozens of seconds (minutes for 5-level config) while checking for
14 * W+X mapping or reading kernel_page_tables debugfs file.
15 */
16static inline int note_kasan_page_table(struct mm_walk *walk,
17					unsigned long addr)
18{
19	struct ptdump_state *st = walk->private;
20
21	st->note_page(st, addr, 4, pte_val(kasan_early_shadow_pte[0]));
22
23	walk->action = ACTION_CONTINUE;
24
25	return 0;
26}
27#endif
28
29static int ptdump_pgd_entry(pgd_t *pgd, unsigned long addr,
30			    unsigned long next, struct mm_walk *walk)
31{
32	struct ptdump_state *st = walk->private;
33	pgd_t val = READ_ONCE(*pgd);
34
35#if CONFIG_PGTABLE_LEVELS > 4 && \
36		(defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS))
37	if (pgd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_p4d)))
38		return note_kasan_page_table(walk, addr);
39#endif
40
41	if (st->effective_prot)
42		st->effective_prot(st, 0, pgd_val(val));
43
44	if (pgd_leaf(val)) {
45		st->note_page(st, addr, 0, pgd_val(val));
46		walk->action = ACTION_CONTINUE;
47	}
48
49	return 0;
50}
51
52static int ptdump_p4d_entry(p4d_t *p4d, unsigned long addr,
53			    unsigned long next, struct mm_walk *walk)
54{
55	struct ptdump_state *st = walk->private;
56	p4d_t val = READ_ONCE(*p4d);
57
58#if CONFIG_PGTABLE_LEVELS > 3 && \
59		(defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS))
60	if (p4d_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pud)))
61		return note_kasan_page_table(walk, addr);
62#endif
63
64	if (st->effective_prot)
65		st->effective_prot(st, 1, p4d_val(val));
66
67	if (p4d_leaf(val)) {
68		st->note_page(st, addr, 1, p4d_val(val));
69		walk->action = ACTION_CONTINUE;
70	}
71
72	return 0;
73}
74
75static int ptdump_pud_entry(pud_t *pud, unsigned long addr,
76			    unsigned long next, struct mm_walk *walk)
77{
78	struct ptdump_state *st = walk->private;
79	pud_t val = READ_ONCE(*pud);
80
81#if CONFIG_PGTABLE_LEVELS > 2 && \
82		(defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS))
83	if (pud_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pmd)))
84		return note_kasan_page_table(walk, addr);
85#endif
86
87	if (st->effective_prot)
88		st->effective_prot(st, 2, pud_val(val));
89
90	if (pud_leaf(val)) {
91		st->note_page(st, addr, 2, pud_val(val));
92		walk->action = ACTION_CONTINUE;
93	}
94
95	return 0;
96}
97
98static int ptdump_pmd_entry(pmd_t *pmd, unsigned long addr,
99			    unsigned long next, struct mm_walk *walk)
100{
101	struct ptdump_state *st = walk->private;
102	pmd_t val = READ_ONCE(*pmd);
103
104#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
105	if (pmd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pte)))
106		return note_kasan_page_table(walk, addr);
107#endif
108
109	if (st->effective_prot)
110		st->effective_prot(st, 3, pmd_val(val));
111	if (pmd_leaf(val)) {
112		st->note_page(st, addr, 3, pmd_val(val));
113		walk->action = ACTION_CONTINUE;
114	}
115
116	return 0;
117}
118
119static int ptdump_pte_entry(pte_t *pte, unsigned long addr,
120			    unsigned long next, struct mm_walk *walk)
121{
122	struct ptdump_state *st = walk->private;
123	pte_t val = ptep_get_lockless(pte);
124
125	if (st->effective_prot)
126		st->effective_prot(st, 4, pte_val(val));
127
128	st->note_page(st, addr, 4, pte_val(val));
129
130	return 0;
131}
132
133static int ptdump_hole(unsigned long addr, unsigned long next,
134		       int depth, struct mm_walk *walk)
135{
136	struct ptdump_state *st = walk->private;
137
138	st->note_page(st, addr, depth, 0);
139
140	return 0;
141}
142
143static const struct mm_walk_ops ptdump_ops = {
144	.pgd_entry	= ptdump_pgd_entry,
145	.p4d_entry	= ptdump_p4d_entry,
146	.pud_entry	= ptdump_pud_entry,
147	.pmd_entry	= ptdump_pmd_entry,
148	.pte_entry	= ptdump_pte_entry,
149	.pte_hole	= ptdump_hole,
150};
151
152void ptdump_walk_pgd(struct ptdump_state *st, struct mm_struct *mm, pgd_t *pgd)
153{
154	const struct ptdump_range *range = st->range;
155
156	mmap_write_lock(mm);
157	while (range->start != range->end) {
158		walk_page_range_novma(mm, range->start, range->end,
159				      &ptdump_ops, pgd, st);
160		range++;
161	}
162	mmap_write_unlock(mm);
163
164	/* Flush out the last page */
165	st->note_page(st, 0, -1, 0);
166}
167
168static int check_wx_show(struct seq_file *m, void *v)
169{
170	if (ptdump_check_wx())
171		seq_puts(m, "SUCCESS\n");
172	else
173		seq_puts(m, "FAILED\n");
174
175	return 0;
176}
177
178DEFINE_SHOW_ATTRIBUTE(check_wx);
179
180static int ptdump_debugfs_init(void)
181{
182	debugfs_create_file("check_wx_pages", 0400, NULL, NULL, &check_wx_fops);
183
184	return 0;
185}
186
187device_initcall(ptdump_debugfs_init);
188