1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
7 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
8 * Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.
10 */
11#include <linux/init.h>
12#include <linux/sched.h>
13#include <linux/mm.h>
14
15#include <asm/cpu.h>
16#include <asm/bootinfo.h>
17#include <asm/mmu_context.h>
18#include <asm/pgtable.h>
19#include <asm/system.h>
20
21extern void build_tlb_refill_handler(void);
22
23/*
24 * Make sure all entries differ.  If they're not different
25 * MIPS32 will take revenge ...
26 */
27#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
28
29/* Atomicity and interruptability */
30#ifdef CONFIG_MIPS_MT_SMTC
31
32#include <asm/smtc.h>
33#include <asm/mipsmtregs.h>
34
35#define ENTER_CRITICAL(flags) \
36	{ \
37	unsigned int mvpflags; \
38	local_irq_save(flags);\
39	mvpflags = dvpe()
40#define EXIT_CRITICAL(flags) \
41	evpe(mvpflags); \
42	local_irq_restore(flags); \
43	}
44#else
45
46#define ENTER_CRITICAL(flags) local_irq_save(flags)
47#define EXIT_CRITICAL(flags) local_irq_restore(flags)
48
49#endif /* CONFIG_MIPS_MT_SMTC */
50
51void local_flush_tlb_all(void)
52{
53	unsigned long flags;
54	unsigned long old_ctx;
55	int entry;
56
57	ENTER_CRITICAL(flags);
58	/* Save old context and create impossible VPN2 value */
59	old_ctx = read_c0_entryhi();
60	write_c0_entrylo0(0);
61	write_c0_entrylo1(0);
62
63	entry = read_c0_wired();
64
65	/* Blast 'em all away. */
66	while (entry < current_cpu_data.tlbsize) {
67		/* Make sure all entries differ. */
68		write_c0_entryhi(UNIQUE_ENTRYHI(entry));
69		write_c0_index(entry);
70		mtc0_tlbw_hazard();
71		tlb_write_indexed();
72		entry++;
73	}
74	tlbw_use_hazard();
75	write_c0_entryhi(old_ctx);
76	EXIT_CRITICAL(flags);
77}
78
79/* All entries common to a mm share an asid.  To effectively flush
80   these entries, we just bump the asid. */
81void local_flush_tlb_mm(struct mm_struct *mm)
82{
83	int cpu;
84
85	preempt_disable();
86
87	cpu = smp_processor_id();
88
89	if (cpu_context(cpu, mm) != 0) {
90		drop_mmu_context(mm, cpu);
91	}
92
93	preempt_enable();
94}
95
96void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
97	unsigned long end)
98{
99	struct mm_struct *mm = vma->vm_mm;
100	int cpu = smp_processor_id();
101
102	if (cpu_context(cpu, mm) != 0) {
103		unsigned long flags;
104		int size;
105
106		ENTER_CRITICAL(flags);
107		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
108		size = (size + 1) >> 1;
109		if (size <= current_cpu_data.tlbsize/2) {
110			int oldpid = read_c0_entryhi();
111			int newpid = cpu_asid(cpu, mm);
112
113			start &= (PAGE_MASK << 1);
114			end += ((PAGE_SIZE << 1) - 1);
115			end &= (PAGE_MASK << 1);
116			while (start < end) {
117				int idx;
118
119				write_c0_entryhi(start | newpid);
120				start += (PAGE_SIZE << 1);
121				mtc0_tlbw_hazard();
122				tlb_probe();
123				tlb_probe_hazard();
124				idx = read_c0_index();
125				write_c0_entrylo0(0);
126				write_c0_entrylo1(0);
127				if (idx < 0)
128					continue;
129				/* Make sure all entries differ. */
130				write_c0_entryhi(UNIQUE_ENTRYHI(idx));
131				mtc0_tlbw_hazard();
132				tlb_write_indexed();
133			}
134			tlbw_use_hazard();
135			write_c0_entryhi(oldpid);
136		} else {
137			drop_mmu_context(mm, cpu);
138		}
139		EXIT_CRITICAL(flags);
140	}
141}
142
143void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
144{
145	unsigned long flags;
146	int size;
147
148	ENTER_CRITICAL(flags);
149	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
150	size = (size + 1) >> 1;
151	if (size <= current_cpu_data.tlbsize / 2) {
152		int pid = read_c0_entryhi();
153
154		start &= (PAGE_MASK << 1);
155		end += ((PAGE_SIZE << 1) - 1);
156		end &= (PAGE_MASK << 1);
157
158		while (start < end) {
159			int idx;
160
161			write_c0_entryhi(start);
162			start += (PAGE_SIZE << 1);
163			mtc0_tlbw_hazard();
164			tlb_probe();
165			tlb_probe_hazard();
166			idx = read_c0_index();
167			write_c0_entrylo0(0);
168			write_c0_entrylo1(0);
169			if (idx < 0)
170				continue;
171			/* Make sure all entries differ. */
172			write_c0_entryhi(UNIQUE_ENTRYHI(idx));
173			mtc0_tlbw_hazard();
174			tlb_write_indexed();
175		}
176		tlbw_use_hazard();
177		write_c0_entryhi(pid);
178	} else {
179		local_flush_tlb_all();
180	}
181	EXIT_CRITICAL(flags);
182}
183
184void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
185{
186	int cpu = smp_processor_id();
187
188	if (cpu_context(cpu, vma->vm_mm) != 0) {
189		unsigned long flags;
190		int oldpid, newpid, idx;
191
192		newpid = cpu_asid(cpu, vma->vm_mm);
193		page &= (PAGE_MASK << 1);
194		ENTER_CRITICAL(flags);
195		oldpid = read_c0_entryhi();
196		write_c0_entryhi(page | newpid);
197		mtc0_tlbw_hazard();
198		tlb_probe();
199		tlb_probe_hazard();
200		idx = read_c0_index();
201		write_c0_entrylo0(0);
202		write_c0_entrylo1(0);
203		if (idx < 0)
204			goto finish;
205		/* Make sure all entries differ. */
206		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
207		mtc0_tlbw_hazard();
208		tlb_write_indexed();
209		tlbw_use_hazard();
210
211	finish:
212		write_c0_entryhi(oldpid);
213		EXIT_CRITICAL(flags);
214	}
215}
216
217/*
218 * This one is only used for pages with the global bit set so we don't care
219 * much about the ASID.
220 */
221void local_flush_tlb_one(unsigned long page)
222{
223	unsigned long flags;
224	int oldpid, idx;
225
226	ENTER_CRITICAL(flags);
227	oldpid = read_c0_entryhi();
228	page &= (PAGE_MASK << 1);
229	write_c0_entryhi(page);
230	mtc0_tlbw_hazard();
231	tlb_probe();
232	tlb_probe_hazard();
233	idx = read_c0_index();
234	write_c0_entrylo0(0);
235	write_c0_entrylo1(0);
236	if (idx >= 0) {
237		/* Make sure all entries differ. */
238		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
239		mtc0_tlbw_hazard();
240		tlb_write_indexed();
241		tlbw_use_hazard();
242	}
243	write_c0_entryhi(oldpid);
244
245	EXIT_CRITICAL(flags);
246}
247
248/*
249 * We will need multiple versions of update_mmu_cache(), one that just
250 * updates the TLB with the new pte(s), and another which also checks
251 * for the R4k "end of page" hardware bug and does the needy.
252 */
253void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
254{
255	unsigned long flags;
256	pgd_t *pgdp;
257	pud_t *pudp;
258	pmd_t *pmdp;
259	pte_t *ptep;
260	int idx, pid;
261
262	/*
263	 * Handle debugger faulting in for debugee.
264	 */
265	if (current->active_mm != vma->vm_mm)
266		return;
267
268	ENTER_CRITICAL(flags);
269
270	pid = read_c0_entryhi() & ASID_MASK;
271	address &= (PAGE_MASK << 1);
272	write_c0_entryhi(address | pid);
273	pgdp = pgd_offset(vma->vm_mm, address);
274	mtc0_tlbw_hazard();
275	tlb_probe();
276	tlb_probe_hazard();
277	pudp = pud_offset(pgdp, address);
278	pmdp = pmd_offset(pudp, address);
279	idx = read_c0_index();
280	ptep = pte_offset_map(pmdp, address);
281
282#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
283	write_c0_entrylo0(ptep->pte_high);
284	ptep++;
285	write_c0_entrylo1(ptep->pte_high);
286#else
287	write_c0_entrylo0(pte_val(*ptep++) >> 6);
288	write_c0_entrylo1(pte_val(*ptep) >> 6);
289#endif
290	mtc0_tlbw_hazard();
291	if (idx < 0)
292		tlb_write_random();
293	else
294		tlb_write_indexed();
295	tlbw_use_hazard();
296	EXIT_CRITICAL(flags);
297}
298
299
300void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
301	unsigned long entryhi, unsigned long pagemask)
302{
303	unsigned long flags;
304	unsigned long wired;
305	unsigned long old_pagemask;
306	unsigned long old_ctx;
307
308	ENTER_CRITICAL(flags);
309	/* Save old context and create impossible VPN2 value */
310	old_ctx = read_c0_entryhi();
311	old_pagemask = read_c0_pagemask();
312	wired = read_c0_wired();
313	write_c0_wired(wired + 1);
314	write_c0_index(wired);
315	tlbw_use_hazard();	/* What is the hazard here? */
316	write_c0_pagemask(pagemask);
317	write_c0_entryhi(entryhi);
318	write_c0_entrylo0(entrylo0);
319	write_c0_entrylo1(entrylo1);
320	mtc0_tlbw_hazard();
321	tlb_write_indexed();
322	tlbw_use_hazard();
323
324	write_c0_entryhi(old_ctx);
325	tlbw_use_hazard();	/* What is the hazard here? */
326	write_c0_pagemask(old_pagemask);
327	local_flush_tlb_all();
328	EXIT_CRITICAL(flags);
329}
330
331/*
332 * Used for loading TLB entries before trap_init() has started, when we
333 * don't actually want to add a wired entry which remains throughout the
334 * lifetime of the system
335 */
336
337static int temp_tlb_entry __initdata;
338
339__init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
340			       unsigned long entryhi, unsigned long pagemask)
341{
342	int ret = 0;
343	unsigned long flags;
344	unsigned long wired;
345	unsigned long old_pagemask;
346	unsigned long old_ctx;
347
348	ENTER_CRITICAL(flags);
349	/* Save old context and create impossible VPN2 value */
350	old_ctx = read_c0_entryhi();
351	old_pagemask = read_c0_pagemask();
352	wired = read_c0_wired();
353	if (--temp_tlb_entry < wired) {
354		printk(KERN_WARNING
355		       "No TLB space left for add_temporary_entry\n");
356		ret = -ENOSPC;
357		goto out;
358	}
359
360	write_c0_index(temp_tlb_entry);
361	write_c0_pagemask(pagemask);
362	write_c0_entryhi(entryhi);
363	write_c0_entrylo0(entrylo0);
364	write_c0_entrylo1(entrylo1);
365	mtc0_tlbw_hazard();
366	tlb_write_indexed();
367	tlbw_use_hazard();
368
369	write_c0_entryhi(old_ctx);
370	write_c0_pagemask(old_pagemask);
371out:
372	EXIT_CRITICAL(flags);
373	return ret;
374}
375
376static void __init probe_tlb(unsigned long config)
377{
378	struct cpuinfo_mips *c = &current_cpu_data;
379	unsigned int reg;
380
381	/*
382	 * If this isn't a MIPS32 / MIPS64 compliant CPU.  Config 1 register
383	 * is not supported, we assume R4k style.  Cpu probing already figured
384	 * out the number of tlb entries.
385	 */
386	if ((c->processor_id & 0xff0000) == PRID_COMP_LEGACY)
387		return;
388#ifdef CONFIG_MIPS_MT_SMTC
389	/*
390	 * If TLB is shared in SMTC system, total size already
391	 * has been calculated and written into cpu_data tlbsize
392	 */
393	if((smtc_status & SMTC_TLB_SHARED) == SMTC_TLB_SHARED)
394		return;
395#endif /* CONFIG_MIPS_MT_SMTC */
396
397	reg = read_c0_config1();
398	if (!((config >> 7) & 3))
399		panic("No TLB present");
400
401	c->tlbsize = ((reg >> 25) & 0x3f) + 1;
402}
403
404static int __initdata ntlb = 0;
405static int __init set_ntlb(char *str)
406{
407	get_option(&str, &ntlb);
408	return 1;
409}
410
411__setup("ntlb=", set_ntlb);
412
413void __init tlb_init(void)
414{
415	unsigned int config = read_c0_config();
416
417	/*
418	 * You should never change this register:
419	 *   - On R4600 1.7 the tlbp never hits for pages smaller than
420	 *     the value in the c0_pagemask register.
421	 *   - The entire mm handling assumes the c0_pagemask register to
422	 *     be set for 4kb pages.
423	 */
424	probe_tlb(config);
425	write_c0_pagemask(PM_DEFAULT_MASK);
426	write_c0_wired(0);
427	write_c0_framemask(0);
428	temp_tlb_entry = current_cpu_data.tlbsize - 1;
429
430        /* From this point on the ARC firmware is dead.  */
431	local_flush_tlb_all();
432
433	/* Did I tell you that ARC SUCKS?  */
434
435	if (ntlb) {
436		if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
437			int wired = current_cpu_data.tlbsize - ntlb;
438			write_c0_wired(wired);
439			write_c0_index(wired-1);
440			printk ("Restricting TLB to %d entries\n", ntlb);
441		} else
442			printk("Ignoring invalid argument ntlb=%d\n", ntlb);
443	}
444
445	build_tlb_refill_handler();
446}
447