1#ifndef __M68K_MMU_CONTEXT_H
2#define __M68K_MMU_CONTEXT_H
3
4#include <linux/config.h>
5
6static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
7{
8}
9
10#ifndef CONFIG_SUN3
11
12#include <asm/setup.h>
13#include <asm/page.h>
14#include <asm/pgalloc.h>
15
16extern inline int
17init_new_context(struct task_struct *tsk, struct mm_struct *mm)
18{
19	mm->context = virt_to_phys(mm->pgd);
20	return 0;
21}
22
23#define destroy_context(mm)		do { } while(0)
24
25extern inline void switch_mm_0230(struct mm_struct *mm)
26{
27	unsigned long crp[2] = {
28		0x80000000 | _PAGE_TABLE, mm->context
29	};
30	unsigned long tmp;
31
32	asm volatile (".chip 68030");
33
34	/* flush MC68030/MC68020 caches (they are virtually addressed) */
35	asm volatile (
36		"movec %%cacr,%0;"
37		"orw %1,%0; "
38		"movec %0,%%cacr"
39		: "=d" (tmp) : "di" (FLUSH_I_AND_D));
40
41	/* Switch the root pointer. For a 030-only kernel,
42	 * avoid flushing the whole ATC, we only need to
43	 * flush the user entries. The 68851 does this by
44	 * itself. Avoid a runtime check here.
45	 */
46	asm volatile (
47#ifdef CPU_M68030_ONLY
48		"pmovefd %0,%%crp; "
49		"pflush #0,#4"
50#else
51		"pmove %0,%%crp"
52#endif
53		: : "m" (crp[0]));
54
55	asm volatile (".chip 68k");
56}
57
58extern inline void switch_mm_0460(struct mm_struct *mm)
59{
60	asm volatile (".chip 68040");
61
62	/* flush address translation cache (user entries) */
63	asm volatile ("pflushan");
64
65	/* switch the root pointer */
66	asm volatile ("movec %0,%%urp" : : "r" (mm->context));
67
68	if (CPU_IS_060) {
69		unsigned long tmp;
70
71		/* clear user entries in the branch cache */
72		asm volatile (
73			"movec %%cacr,%0; "
74		        "orl %1,%0; "
75		        "movec %0,%%cacr"
76			: "=d" (tmp): "di" (0x00200000));
77	}
78
79	asm volatile (".chip 68k");
80}
81
82static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu)
83{
84	if (prev != next) {
85		if (CPU_IS_020_OR_030)
86			switch_mm_0230(next);
87		else
88			switch_mm_0460(next);
89	}
90}
91
92extern inline void activate_mm(struct mm_struct *prev_mm,
93			       struct mm_struct *next_mm)
94{
95	next_mm->context = virt_to_phys(next_mm->pgd);
96
97	if (CPU_IS_020_OR_030)
98		switch_mm_0230(next_mm);
99	else
100		switch_mm_0460(next_mm);
101}
102
103#else  /* CONFIG_SUN3 */
104#include <asm/sun3mmu.h>
105#include <linux/sched.h>
106
107extern unsigned long get_free_context(struct mm_struct *mm);
108extern void clear_context(unsigned long context);
109
110/* set the context for a new task to unmapped */
111static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
112{
113	mm->context = SUN3_INVALID_CONTEXT;
114	return 0;
115}
116
117/* find the context given to this process, and if it hasn't already
118   got one, go get one for it. */
119static inline void get_mmu_context(struct mm_struct *mm)
120{
121	if(mm->context == SUN3_INVALID_CONTEXT)
122		mm->context = get_free_context(mm);
123}
124
125/* flush context if allocated... */
126static inline void destroy_context(struct mm_struct *mm)
127{
128	if(mm->context != SUN3_INVALID_CONTEXT)
129		clear_context(mm->context);
130}
131
132static inline void activate_context(struct mm_struct *mm)
133{
134	get_mmu_context(mm);
135	sun3_put_context(mm->context);
136}
137
138static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu)
139{
140	activate_context(tsk->mm);
141}
142
143extern inline void activate_mm(struct mm_struct *prev_mm,
144			       struct mm_struct *next_mm)
145{
146	activate_context(next_mm);
147}
148
149#endif
150#endif
151