1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright (c) 2021, Google LLC.
5 * Pasha Tatashin <pasha.tatashin@soleen.com>
6 */
7#include <linux/kstrtox.h>
8#include <linux/mm.h>
9#include <linux/page_table_check.h>
10
11#undef pr_fmt
12#define pr_fmt(fmt)	"page_table_check: " fmt
13
14struct page_table_check {
15	atomic_t anon_map_count;
16	atomic_t file_map_count;
17};
18
19static bool __page_table_check_enabled __initdata =
20				IS_ENABLED(CONFIG_PAGE_TABLE_CHECK_ENFORCED);
21
22DEFINE_STATIC_KEY_TRUE(page_table_check_disabled);
23EXPORT_SYMBOL(page_table_check_disabled);
24
25static int __init early_page_table_check_param(char *buf)
26{
27	return kstrtobool(buf, &__page_table_check_enabled);
28}
29
30early_param("page_table_check", early_page_table_check_param);
31
32static bool __init need_page_table_check(void)
33{
34	return __page_table_check_enabled;
35}
36
37static void __init init_page_table_check(void)
38{
39	if (!__page_table_check_enabled)
40		return;
41	static_branch_disable(&page_table_check_disabled);
42}
43
44struct page_ext_operations page_table_check_ops = {
45	.size = sizeof(struct page_table_check),
46	.need = need_page_table_check,
47	.init = init_page_table_check,
48	.need_shared_flags = false,
49};
50
51static struct page_table_check *get_page_table_check(struct page_ext *page_ext)
52{
53	BUG_ON(!page_ext);
54	return page_ext_data(page_ext, &page_table_check_ops);
55}
56
57/*
58 * An entry is removed from the page table, decrement the counters for that page
59 * verify that it is of correct type and counters do not become negative.
60 */
61static void page_table_check_clear(unsigned long pfn, unsigned long pgcnt)
62{
63	struct page_ext *page_ext;
64	struct page *page;
65	unsigned long i;
66	bool anon;
67
68	if (!pfn_valid(pfn))
69		return;
70
71	page = pfn_to_page(pfn);
72	page_ext = page_ext_get(page);
73
74	BUG_ON(PageSlab(page));
75	anon = PageAnon(page);
76
77	for (i = 0; i < pgcnt; i++) {
78		struct page_table_check *ptc = get_page_table_check(page_ext);
79
80		if (anon) {
81			BUG_ON(atomic_read(&ptc->file_map_count));
82			BUG_ON(atomic_dec_return(&ptc->anon_map_count) < 0);
83		} else {
84			BUG_ON(atomic_read(&ptc->anon_map_count));
85			BUG_ON(atomic_dec_return(&ptc->file_map_count) < 0);
86		}
87		page_ext = page_ext_next(page_ext);
88	}
89	page_ext_put(page_ext);
90}
91
92/*
93 * A new entry is added to the page table, increment the counters for that page
94 * verify that it is of correct type and is not being mapped with a different
95 * type to a different process.
96 */
97static void page_table_check_set(unsigned long pfn, unsigned long pgcnt,
98				 bool rw)
99{
100	struct page_ext *page_ext;
101	struct page *page;
102	unsigned long i;
103	bool anon;
104
105	if (!pfn_valid(pfn))
106		return;
107
108	page = pfn_to_page(pfn);
109	page_ext = page_ext_get(page);
110
111	BUG_ON(PageSlab(page));
112	anon = PageAnon(page);
113
114	for (i = 0; i < pgcnt; i++) {
115		struct page_table_check *ptc = get_page_table_check(page_ext);
116
117		if (anon) {
118			BUG_ON(atomic_read(&ptc->file_map_count));
119			BUG_ON(atomic_inc_return(&ptc->anon_map_count) > 1 && rw);
120		} else {
121			BUG_ON(atomic_read(&ptc->anon_map_count));
122			BUG_ON(atomic_inc_return(&ptc->file_map_count) < 0);
123		}
124		page_ext = page_ext_next(page_ext);
125	}
126	page_ext_put(page_ext);
127}
128
129/*
130 * page is on free list, or is being allocated, verify that counters are zeroes
131 * crash if they are not.
132 */
133void __page_table_check_zero(struct page *page, unsigned int order)
134{
135	struct page_ext *page_ext;
136	unsigned long i;
137
138	BUG_ON(PageSlab(page));
139
140	page_ext = page_ext_get(page);
141	BUG_ON(!page_ext);
142	for (i = 0; i < (1ul << order); i++) {
143		struct page_table_check *ptc = get_page_table_check(page_ext);
144
145		BUG_ON(atomic_read(&ptc->anon_map_count));
146		BUG_ON(atomic_read(&ptc->file_map_count));
147		page_ext = page_ext_next(page_ext);
148	}
149	page_ext_put(page_ext);
150}
151
152void __page_table_check_pte_clear(struct mm_struct *mm, pte_t pte)
153{
154	if (&init_mm == mm)
155		return;
156
157	if (pte_user_accessible_page(pte)) {
158		page_table_check_clear(pte_pfn(pte), PAGE_SIZE >> PAGE_SHIFT);
159	}
160}
161EXPORT_SYMBOL(__page_table_check_pte_clear);
162
163void __page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd)
164{
165	if (&init_mm == mm)
166		return;
167
168	if (pmd_user_accessible_page(pmd)) {
169		page_table_check_clear(pmd_pfn(pmd), PMD_SIZE >> PAGE_SHIFT);
170	}
171}
172EXPORT_SYMBOL(__page_table_check_pmd_clear);
173
174void __page_table_check_pud_clear(struct mm_struct *mm, pud_t pud)
175{
176	if (&init_mm == mm)
177		return;
178
179	if (pud_user_accessible_page(pud)) {
180		page_table_check_clear(pud_pfn(pud), PUD_SIZE >> PAGE_SHIFT);
181	}
182}
183EXPORT_SYMBOL(__page_table_check_pud_clear);
184
185void __page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte,
186		unsigned int nr)
187{
188	unsigned int i;
189
190	if (&init_mm == mm)
191		return;
192
193	for (i = 0; i < nr; i++)
194		__page_table_check_pte_clear(mm, ptep_get(ptep + i));
195	if (pte_user_accessible_page(pte))
196		page_table_check_set(pte_pfn(pte), nr, pte_write(pte));
197}
198EXPORT_SYMBOL(__page_table_check_ptes_set);
199
200void __page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd)
201{
202	if (&init_mm == mm)
203		return;
204
205	__page_table_check_pmd_clear(mm, *pmdp);
206	if (pmd_user_accessible_page(pmd)) {
207		page_table_check_set(pmd_pfn(pmd), PMD_SIZE >> PAGE_SHIFT,
208				     pmd_write(pmd));
209	}
210}
211EXPORT_SYMBOL(__page_table_check_pmd_set);
212
213void __page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp, pud_t pud)
214{
215	if (&init_mm == mm)
216		return;
217
218	__page_table_check_pud_clear(mm, *pudp);
219	if (pud_user_accessible_page(pud)) {
220		page_table_check_set(pud_pfn(pud), PUD_SIZE >> PAGE_SHIFT,
221				     pud_write(pud));
222	}
223}
224EXPORT_SYMBOL(__page_table_check_pud_set);
225
226void __page_table_check_pte_clear_range(struct mm_struct *mm,
227					unsigned long addr,
228					pmd_t pmd)
229{
230	if (&init_mm == mm)
231		return;
232
233	if (!pmd_bad(pmd) && !pmd_leaf(pmd)) {
234		pte_t *ptep = pte_offset_map(&pmd, addr);
235		unsigned long i;
236
237		if (WARN_ON(!ptep))
238			return;
239		for (i = 0; i < PTRS_PER_PTE; i++) {
240			__page_table_check_pte_clear(mm, ptep_get(ptep));
241			addr += PAGE_SIZE;
242			ptep++;
243		}
244		pte_unmap(ptep - PTRS_PER_PTE);
245	}
246}
247