1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __PARISC_MMU_CONTEXT_H
3#define __PARISC_MMU_CONTEXT_H
4
5#include <linux/mm.h>
6#include <linux/sched.h>
7#include <linux/atomic.h>
8#include <linux/spinlock.h>
9#include <asm-generic/mm_hooks.h>
10
11/* on PA-RISC, we actually have enough contexts to justify an allocator
12 * for them.  prumpf */
13
14extern unsigned long alloc_sid(void);
15extern void free_sid(unsigned long);
16
17#define init_new_context init_new_context
18static inline int
19init_new_context(struct task_struct *tsk, struct mm_struct *mm)
20{
21	BUG_ON(atomic_read(&mm->mm_users) != 1);
22
23	mm->context.space_id = alloc_sid();
24	return 0;
25}
26
27#define destroy_context destroy_context
28static inline void
29destroy_context(struct mm_struct *mm)
30{
31	free_sid(mm->context.space_id);
32	mm->context.space_id = 0;
33}
34
35static inline unsigned long __space_to_prot(mm_context_t context)
36{
37#if SPACEID_SHIFT == 0
38	return context.space_id << 1;
39#else
40	return context.space_id >> (SPACEID_SHIFT - 1);
41#endif
42}
43
44static inline void load_context(mm_context_t context)
45{
46	mtsp(context.space_id, SR_USER);
47	mtctl(__space_to_prot(context), 8);
48}
49
50static inline void switch_mm_irqs_off(struct mm_struct *prev,
51		struct mm_struct *next, struct task_struct *tsk)
52{
53	if (prev != next) {
54#ifdef CONFIG_TLB_PTLOCK
55		/* put physical address of page_table_lock in cr28 (tr4)
56		   for TLB faults */
57		spinlock_t *pgd_lock = &next->page_table_lock;
58		mtctl(__pa(__ldcw_align(&pgd_lock->rlock.raw_lock)), 28);
59#endif
60		mtctl(__pa(next->pgd), 25);
61		load_context(next->context);
62	}
63}
64
65static inline void switch_mm(struct mm_struct *prev,
66		struct mm_struct *next, struct task_struct *tsk)
67{
68	unsigned long flags;
69
70	if (prev == next)
71		return;
72
73	local_irq_save(flags);
74	switch_mm_irqs_off(prev, next, tsk);
75	local_irq_restore(flags);
76}
77#define switch_mm_irqs_off switch_mm_irqs_off
78
79#define activate_mm activate_mm
80static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
81{
82	/*
83	 * Activate_mm is our one chance to allocate a space id
84	 * for a new mm created in the exec path. There's also
85	 * some lazy tlb stuff, which is currently dead code, but
86	 * we only allocate a space id if one hasn't been allocated
87	 * already, so we should be OK.
88	 */
89
90	BUG_ON(next == &init_mm); /* Should never happen */
91
92	if (next->context.space_id == 0)
93		next->context.space_id = alloc_sid();
94
95	switch_mm(prev,next,current);
96}
97
98#include <asm-generic/mmu_context.h>
99
100#endif
101