1#ifndef __M68K_MMU_CONTEXT_H
2#define __M68K_MMU_CONTEXT_H
3
4#include <asm-generic/mm_hooks.h>
5
6static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
7{
8}
9
10#ifndef CONFIG_SUN3
11
12#include <asm/setup.h>
13#include <asm/page.h>
14#include <asm/pgalloc.h>
15
16static inline int init_new_context(struct task_struct *tsk,
17				   struct mm_struct *mm)
18{
19	mm->context = virt_to_phys(mm->pgd);
20	return 0;
21}
22
23#define destroy_context(mm)		do { } while(0)
24
25static inline void switch_mm_0230(struct mm_struct *mm)
26{
27	unsigned long crp[2] = {
28		0x80000000 | _PAGE_TABLE, mm->context
29	};
30	unsigned long tmp;
31
32	asm volatile (".chip 68030");
33
34	/* flush MC68030/MC68020 caches (they are virtually addressed) */
35	asm volatile (
36		"movec %%cacr,%0;"
37		"orw %1,%0; "
38		"movec %0,%%cacr"
39		: "=d" (tmp) : "di" (FLUSH_I_AND_D));
40
41	/* Switch the root pointer. For a 030-only kernel,
42	 * avoid flushing the whole ATC, we only need to
43	 * flush the user entries. The 68851 does this by
44	 * itself. Avoid a runtime check here.
45	 */
46	asm volatile (
47#ifdef CPU_M68030_ONLY
48		"pmovefd %0,%%crp; "
49		"pflush #0,#4"
50#else
51		"pmove %0,%%crp"
52#endif
53		: : "m" (crp[0]));
54
55	asm volatile (".chip 68k");
56}
57
58static inline void switch_mm_0460(struct mm_struct *mm)
59{
60	asm volatile (".chip 68040");
61
62	/* flush address translation cache (user entries) */
63	asm volatile ("pflushan");
64
65	/* switch the root pointer */
66	asm volatile ("movec %0,%%urp" : : "r" (mm->context));
67
68	if (CPU_IS_060) {
69		unsigned long tmp;
70
71		/* clear user entries in the branch cache */
72		asm volatile (
73			"movec %%cacr,%0; "
74		        "orl %1,%0; "
75		        "movec %0,%%cacr"
76			: "=d" (tmp): "di" (0x00200000));
77	}
78
79	asm volatile (".chip 68k");
80}
81
82static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
83{
84	if (prev != next) {
85		if (CPU_IS_020_OR_030)
86			switch_mm_0230(next);
87		else
88			switch_mm_0460(next);
89	}
90}
91
92#define deactivate_mm(tsk,mm)	do { } while (0)
93
94static inline void activate_mm(struct mm_struct *prev_mm,
95			       struct mm_struct *next_mm)
96{
97	next_mm->context = virt_to_phys(next_mm->pgd);
98
99	if (CPU_IS_020_OR_030)
100		switch_mm_0230(next_mm);
101	else
102		switch_mm_0460(next_mm);
103}
104
105#else  /* CONFIG_SUN3 */
106#include <asm/sun3mmu.h>
107#include <linux/sched.h>
108
109extern unsigned long get_free_context(struct mm_struct *mm);
110extern void clear_context(unsigned long context);
111
112/* set the context for a new task to unmapped */
113static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
114{
115	mm->context = SUN3_INVALID_CONTEXT;
116	return 0;
117}
118
119/* find the context given to this process, and if it hasn't already
120   got one, go get one for it. */
121static inline void get_mmu_context(struct mm_struct *mm)
122{
123	if(mm->context == SUN3_INVALID_CONTEXT)
124		mm->context = get_free_context(mm);
125}
126
127/* flush context if allocated... */
128static inline void destroy_context(struct mm_struct *mm)
129{
130	if(mm->context != SUN3_INVALID_CONTEXT)
131		clear_context(mm->context);
132}
133
134static inline void activate_context(struct mm_struct *mm)
135{
136	get_mmu_context(mm);
137	sun3_put_context(mm->context);
138}
139
140static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
141{
142	activate_context(tsk->mm);
143}
144
145#define deactivate_mm(tsk,mm)	do { } while (0)
146
147static inline void activate_mm(struct mm_struct *prev_mm,
148			       struct mm_struct *next_mm)
149{
150	activate_context(next_mm);
151}
152
153#endif
154#endif
155