1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 *  MMU context allocation for 64-bit kernels.
4 *
5 *  Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org>
6 */
7
8#include <linux/sched.h>
9#include <linux/kernel.h>
10#include <linux/errno.h>
11#include <linux/string.h>
12#include <linux/types.h>
13#include <linux/mm.h>
14#include <linux/pkeys.h>
15#include <linux/spinlock.h>
16#include <linux/idr.h>
17#include <linux/export.h>
18#include <linux/gfp.h>
19#include <linux/slab.h>
20#include <linux/cpu.h>
21
22#include <asm/mmu_context.h>
23#include <asm/pgalloc.h>
24
25#include "internal.h"
26
27static DEFINE_IDA(mmu_context_ida);
28
29static int alloc_context_id(int min_id, int max_id)
30{
31	return ida_alloc_range(&mmu_context_ida, min_id, max_id, GFP_KERNEL);
32}
33
34#ifdef CONFIG_PPC_64S_HASH_MMU
35void __init hash__reserve_context_id(int id)
36{
37	int result = ida_alloc_range(&mmu_context_ida, id, id, GFP_KERNEL);
38
39	WARN(result != id, "mmu: Failed to reserve context id %d (rc %d)\n", id, result);
40}
41
42int hash__alloc_context_id(void)
43{
44	unsigned long max;
45
46	if (mmu_has_feature(MMU_FTR_68_BIT_VA))
47		max = MAX_USER_CONTEXT;
48	else
49		max = MAX_USER_CONTEXT_65BIT_VA;
50
51	return alloc_context_id(MIN_USER_CONTEXT, max);
52}
53EXPORT_SYMBOL_GPL(hash__alloc_context_id);
54#endif
55
56#ifdef CONFIG_PPC_64S_HASH_MMU
57static int realloc_context_ids(mm_context_t *ctx)
58{
59	int i, id;
60
61	/*
62	 * id 0 (aka. ctx->id) is special, we always allocate a new one, even if
63	 * there wasn't one allocated previously (which happens in the exec
64	 * case where ctx is newly allocated).
65	 *
66	 * We have to be a bit careful here. We must keep the existing ids in
67	 * the array, so that we can test if they're non-zero to decide if we
68	 * need to allocate a new one. However in case of error we must free the
69	 * ids we've allocated but *not* any of the existing ones (or risk a
70	 * UAF). That's why we decrement i at the start of the error handling
71	 * loop, to skip the id that we just tested but couldn't reallocate.
72	 */
73	for (i = 0; i < ARRAY_SIZE(ctx->extended_id); i++) {
74		if (i == 0 || ctx->extended_id[i]) {
75			id = hash__alloc_context_id();
76			if (id < 0)
77				goto error;
78
79			ctx->extended_id[i] = id;
80		}
81	}
82
83	/* The caller expects us to return id */
84	return ctx->id;
85
86error:
87	for (i--; i >= 0; i--) {
88		if (ctx->extended_id[i])
89			ida_free(&mmu_context_ida, ctx->extended_id[i]);
90	}
91
92	return id;
93}
94
95static int hash__init_new_context(struct mm_struct *mm)
96{
97	int index;
98
99	mm->context.hash_context = kmalloc(sizeof(struct hash_mm_context),
100					   GFP_KERNEL);
101	if (!mm->context.hash_context)
102		return -ENOMEM;
103
104	/*
105	 * The old code would re-promote on fork, we don't do that when using
106	 * slices as it could cause problem promoting slices that have been
107	 * forced down to 4K.
108	 *
109	 * For book3s we have MMU_NO_CONTEXT set to be ~0. Hence check
110	 * explicitly against context.id == 0. This ensures that we properly
111	 * initialize context slice details for newly allocated mm's (which will
112	 * have id == 0) and don't alter context slice inherited via fork (which
113	 * will have id != 0).
114	 *
115	 * We should not be calling init_new_context() on init_mm. Hence a
116	 * check against 0 is OK.
117	 */
118	if (mm->context.id == 0) {
119		memset(mm->context.hash_context, 0, sizeof(struct hash_mm_context));
120		slice_init_new_context_exec(mm);
121	} else {
122		/* This is fork. Copy hash_context details from current->mm */
123		memcpy(mm->context.hash_context, current->mm->context.hash_context, sizeof(struct hash_mm_context));
124#ifdef CONFIG_PPC_SUBPAGE_PROT
125		/* inherit subpage prot details if we have one. */
126		if (current->mm->context.hash_context->spt) {
127			mm->context.hash_context->spt = kmalloc(sizeof(struct subpage_prot_table),
128								GFP_KERNEL);
129			if (!mm->context.hash_context->spt) {
130				kfree(mm->context.hash_context);
131				return -ENOMEM;
132			}
133		}
134#endif
135	}
136
137	index = realloc_context_ids(&mm->context);
138	if (index < 0) {
139#ifdef CONFIG_PPC_SUBPAGE_PROT
140		kfree(mm->context.hash_context->spt);
141#endif
142		kfree(mm->context.hash_context);
143		return index;
144	}
145
146	pkey_mm_init(mm);
147	return index;
148}
149
150void hash__setup_new_exec(void)
151{
152	slice_setup_new_exec();
153
154	slb_setup_new_exec();
155}
156#else
157static inline int hash__init_new_context(struct mm_struct *mm)
158{
159	BUILD_BUG();
160	return 0;
161}
162#endif
163
164static int radix__init_new_context(struct mm_struct *mm)
165{
166	unsigned long rts_field;
167	int index, max_id;
168
169	max_id = (1 << mmu_pid_bits) - 1;
170	index = alloc_context_id(mmu_base_pid, max_id);
171	if (index < 0)
172		return index;
173
174	/*
175	 * set the process table entry,
176	 */
177	rts_field = radix__get_tree_size();
178	process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE);
179
180	/*
181	 * Order the above store with subsequent update of the PID
182	 * register (at which point HW can start loading/caching
183	 * the entry) and the corresponding load by the MMU from
184	 * the L2 cache.
185	 */
186	asm volatile("ptesync;isync" : : : "memory");
187
188#ifdef CONFIG_PPC_64S_HASH_MMU
189	mm->context.hash_context = NULL;
190#endif
191
192	return index;
193}
194
195int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
196{
197	int index;
198
199	if (radix_enabled())
200		index = radix__init_new_context(mm);
201	else
202		index = hash__init_new_context(mm);
203
204	if (index < 0)
205		return index;
206
207	mm->context.id = index;
208
209	mm->context.pte_frag = NULL;
210	mm->context.pmd_frag = NULL;
211#ifdef CONFIG_SPAPR_TCE_IOMMU
212	mm_iommu_init(mm);
213#endif
214	atomic_set(&mm->context.active_cpus, 0);
215	atomic_set(&mm->context.copros, 0);
216
217	return 0;
218}
219
220void __destroy_context(int context_id)
221{
222	ida_free(&mmu_context_ida, context_id);
223}
224EXPORT_SYMBOL_GPL(__destroy_context);
225
226static void destroy_contexts(mm_context_t *ctx)
227{
228	if (radix_enabled()) {
229		ida_free(&mmu_context_ida, ctx->id);
230	} else {
231#ifdef CONFIG_PPC_64S_HASH_MMU
232		int index, context_id;
233
234		for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) {
235			context_id = ctx->extended_id[index];
236			if (context_id)
237				ida_free(&mmu_context_ida, context_id);
238		}
239		kfree(ctx->hash_context);
240#else
241		BUILD_BUG(); // radix_enabled() should be constant true
242#endif
243	}
244}
245
246static void pmd_frag_destroy(void *pmd_frag)
247{
248	int count;
249	struct ptdesc *ptdesc;
250
251	ptdesc = virt_to_ptdesc(pmd_frag);
252	/* drop all the pending references */
253	count = ((unsigned long)pmd_frag & ~PAGE_MASK) >> PMD_FRAG_SIZE_SHIFT;
254	/* We allow PTE_FRAG_NR fragments from a PTE page */
255	if (atomic_sub_and_test(PMD_FRAG_NR - count, &ptdesc->pt_frag_refcount)) {
256		pagetable_pmd_dtor(ptdesc);
257		pagetable_free(ptdesc);
258	}
259}
260
261static void destroy_pagetable_cache(struct mm_struct *mm)
262{
263	void *frag;
264
265	frag = mm->context.pte_frag;
266	if (frag)
267		pte_frag_destroy(frag);
268
269	frag = mm->context.pmd_frag;
270	if (frag)
271		pmd_frag_destroy(frag);
272	return;
273}
274
275void destroy_context(struct mm_struct *mm)
276{
277#ifdef CONFIG_SPAPR_TCE_IOMMU
278	WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list));
279#endif
280	/*
281	 * For tasks which were successfully initialized we end up calling
282	 * arch_exit_mmap() which clears the process table entry. And
283	 * arch_exit_mmap() is called before the required fullmm TLB flush
284	 * which does a RIC=2 flush. Hence for an initialized task, we do clear
285	 * any cached process table entries.
286	 *
287	 * The condition below handles the error case during task init. We have
288	 * set the process table entry early and if we fail a task
289	 * initialization, we need to ensure the process table entry is zeroed.
290	 * We need not worry about process table entry caches because the task
291	 * never ran with the PID value.
292	 */
293	if (radix_enabled())
294		process_tb[mm->context.id].prtb0 = 0;
295	else
296		subpage_prot_free(mm);
297	destroy_contexts(&mm->context);
298	mm->context.id = MMU_NO_CONTEXT;
299}
300
301void arch_exit_mmap(struct mm_struct *mm)
302{
303	destroy_pagetable_cache(mm);
304
305	if (radix_enabled()) {
306		/*
307		 * Radix doesn't have a valid bit in the process table
308		 * entries. However we know that at least P9 implementation
309		 * will avoid caching an entry with an invalid RTS field,
310		 * and 0 is invalid. So this will do.
311		 *
312		 * This runs before the "fullmm" tlb flush in exit_mmap,
313		 * which does a RIC=2 tlbie to clear the process table
314		 * entry. See the "fullmm" comments in tlb-radix.c.
315		 *
316		 * No barrier required here after the store because
317		 * this process will do the invalidate, which starts with
318		 * ptesync.
319		 */
320		process_tb[mm->context.id].prtb0 = 0;
321	}
322}
323
324#ifdef CONFIG_PPC_RADIX_MMU
325void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
326{
327	mtspr(SPRN_PID, next->context.id);
328	isync();
329}
330#endif
331
332/**
333 * cleanup_cpu_mmu_context - Clean up MMU details for this CPU (newly offlined)
334 *
335 * This clears the CPU from mm_cpumask for all processes, and then flushes the
336 * local TLB to ensure TLB coherency in case the CPU is onlined again.
337 *
338 * KVM guest translations are not necessarily flushed here. If KVM started
339 * using mm_cpumask or the Linux APIs which do, this would have to be resolved.
340 */
341#ifdef CONFIG_HOTPLUG_CPU
342void cleanup_cpu_mmu_context(void)
343{
344	int cpu = smp_processor_id();
345
346	clear_tasks_mm_cpumask(cpu);
347	tlbiel_all();
348}
349#endif
350