1/*-
2 * Copyright (c) 2004-2010 Juli Mallett <jmallett@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29#include "opt_ddb.h"
30
31#include <sys/param.h>
32#include <sys/kernel.h>
33#include <sys/systm.h>
34#include <sys/pcpu.h>
35#include <sys/smp.h>
36
37#include <vm/vm.h>
38#include <vm/pmap.h>
39
40#include <machine/pte.h>
41#include <machine/tlb.h>
42
43#if defined(CPU_CNMIPS)
44#define	MIPS_MAX_TLB_ENTRIES	128
45#elif defined(CPU_NLM)
46#define	MIPS_MAX_TLB_ENTRIES	(2048 + 128)
47#else
48#define	MIPS_MAX_TLB_ENTRIES	64
49#endif
50
51struct tlb_state {
52	unsigned wired;
53	struct tlb_entry {
54		register_t entryhi;
55		register_t entrylo0;
56		register_t entrylo1;
57		register_t pagemask;
58	} entry[MIPS_MAX_TLB_ENTRIES];
59};
60
61static struct tlb_state tlb_state[MAXCPU];
62
63#if 0
64/*
65 * PageMask must increment in steps of 2 bits.
66 */
67COMPILE_TIME_ASSERT(POPCNT(TLBMASK_MASK) % 2 == 0);
68#endif
69
70static inline void
71tlb_probe(void)
72{
73	__asm __volatile ("tlbp" : : : "memory");
74	mips_cp0_sync();
75}
76
77static inline void
78tlb_read(void)
79{
80	__asm __volatile ("tlbr" : : : "memory");
81	mips_cp0_sync();
82}
83
84static inline void
85tlb_write_indexed(void)
86{
87	__asm __volatile ("tlbwi" : : : "memory");
88	mips_cp0_sync();
89}
90
91static inline void
92tlb_write_random(void)
93{
94	__asm __volatile ("tlbwr" : : : "memory");
95	mips_cp0_sync();
96}
97
98static void tlb_invalidate_one(unsigned);
99
100void
101tlb_insert_wired(unsigned i, vm_offset_t va, pt_entry_t pte0, pt_entry_t pte1)
102{
103	register_t asid;
104	register_t s;
105
106	va &= ~PAGE_MASK;
107
108	s = intr_disable();
109	asid = mips_rd_entryhi() & TLBHI_ASID_MASK;
110
111	mips_wr_index(i);
112	mips_wr_pagemask(0);
113	mips_wr_entryhi(TLBHI_ENTRY(va, 0));
114	mips_wr_entrylo0(pte0);
115	mips_wr_entrylo1(pte1);
116	tlb_write_indexed();
117
118	mips_wr_entryhi(asid);
119	intr_restore(s);
120}
121
122void
123tlb_invalidate_address(struct pmap *pmap, vm_offset_t va)
124{
125	register_t asid;
126	register_t s;
127	int i;
128
129	va &= ~PAGE_MASK;
130
131	s = intr_disable();
132	asid = mips_rd_entryhi() & TLBHI_ASID_MASK;
133
134	mips_wr_pagemask(0);
135	mips_wr_entryhi(TLBHI_ENTRY(va, pmap_asid(pmap)));
136	tlb_probe();
137	i = mips_rd_index();
138	if (i >= 0)
139		tlb_invalidate_one(i);
140
141	mips_wr_entryhi(asid);
142	intr_restore(s);
143}
144
145void
146tlb_invalidate_all(void)
147{
148	register_t asid;
149	register_t s;
150	unsigned i;
151
152	s = intr_disable();
153	asid = mips_rd_entryhi() & TLBHI_ASID_MASK;
154
155	for (i = mips_rd_wired(); i < num_tlbentries; i++)
156		tlb_invalidate_one(i);
157
158	mips_wr_entryhi(asid);
159	intr_restore(s);
160}
161
162void
163tlb_invalidate_all_user(struct pmap *pmap)
164{
165	register_t asid;
166	register_t s;
167	unsigned i;
168
169	s = intr_disable();
170	asid = mips_rd_entryhi() & TLBHI_ASID_MASK;
171
172	for (i = mips_rd_wired(); i < num_tlbentries; i++) {
173		register_t uasid;
174
175		mips_wr_index(i);
176		tlb_read();
177
178		uasid = mips_rd_entryhi() & TLBHI_ASID_MASK;
179		if (pmap == NULL) {
180			/*
181			 * Invalidate all non-kernel entries.
182			 */
183			if (uasid == 0)
184				continue;
185		} else {
186			/*
187			 * Invalidate this pmap's entries.
188			 */
189			if (uasid != pmap_asid(pmap))
190				continue;
191		}
192		tlb_invalidate_one(i);
193	}
194
195	mips_wr_entryhi(asid);
196	intr_restore(s);
197}
198
199/*
200 * Invalidates any TLB entries that map a virtual page from the specified
201 * address range.  If "end" is zero, then every virtual page is considered to
202 * be within the address range's upper bound.
203 */
204void
205tlb_invalidate_range(pmap_t pmap, vm_offset_t start, vm_offset_t end)
206{
207	register_t asid, end_hi, hi, hi_pagemask, s, save_asid, start_hi;
208	int i;
209
210	KASSERT(start < end || (end == 0 && start > 0),
211	    ("tlb_invalidate_range: invalid range"));
212
213	/*
214	 * Truncate the virtual address "start" to an even page frame number,
215	 * and round the virtual address "end" to an even page frame number.
216	 */
217	start &= ~((1 << TLBMASK_SHIFT) - 1);
218	end = (end + (1 << TLBMASK_SHIFT) - 1) & ~((1 << TLBMASK_SHIFT) - 1);
219
220	s = intr_disable();
221	save_asid = mips_rd_entryhi() & TLBHI_ASID_MASK;
222
223	asid = pmap_asid(pmap);
224	start_hi = TLBHI_ENTRY(start, asid);
225	end_hi = TLBHI_ENTRY(end, asid);
226
227	/*
228	 * Select the fastest method for invalidating the TLB entries.
229	 */
230	if (end - start < num_tlbentries << TLBMASK_SHIFT || (end == 0 &&
231	    start >= -(num_tlbentries << TLBMASK_SHIFT))) {
232		/*
233		 * The virtual address range is small compared to the size of
234		 * the TLB.  Probe the TLB for each even numbered page frame
235		 * within the virtual address range.
236		 */
237		for (hi = start_hi; hi != end_hi; hi += 1 << TLBMASK_SHIFT) {
238			mips_wr_pagemask(0);
239			mips_wr_entryhi(hi);
240			tlb_probe();
241			i = mips_rd_index();
242			if (i >= 0)
243				tlb_invalidate_one(i);
244		}
245	} else {
246		/*
247		 * The virtual address range is large compared to the size of
248		 * the TLB.  Test every non-wired TLB entry.
249		 */
250		for (i = mips_rd_wired(); i < num_tlbentries; i++) {
251			mips_wr_index(i);
252			tlb_read();
253			hi = mips_rd_entryhi();
254			if ((hi & TLBHI_ASID_MASK) == asid && (hi < end_hi ||
255			    end == 0)) {
256				/*
257				 * If "hi" is a large page that spans
258				 * "start_hi", then it must be invalidated.
259				 */
260				hi_pagemask = mips_rd_pagemask();
261				if (hi >= (start_hi & ~(hi_pagemask <<
262				    TLBMASK_SHIFT)))
263					tlb_invalidate_one(i);
264			}
265		}
266	}
267
268	mips_wr_entryhi(save_asid);
269	intr_restore(s);
270}
271
272/* XXX Only if DDB?  */
273void
274tlb_save(void)
275{
276	unsigned ntlb, i, cpu;
277
278	cpu = PCPU_GET(cpuid);
279	if (num_tlbentries > MIPS_MAX_TLB_ENTRIES)
280		ntlb = MIPS_MAX_TLB_ENTRIES;
281	else
282		ntlb = num_tlbentries;
283	tlb_state[cpu].wired = mips_rd_wired();
284	for (i = 0; i < ntlb; i++) {
285		mips_wr_index(i);
286		tlb_read();
287
288		tlb_state[cpu].entry[i].entryhi = mips_rd_entryhi();
289		tlb_state[cpu].entry[i].pagemask = mips_rd_pagemask();
290		tlb_state[cpu].entry[i].entrylo0 = mips_rd_entrylo0();
291		tlb_state[cpu].entry[i].entrylo1 = mips_rd_entrylo1();
292	}
293}
294
295void
296tlb_update(struct pmap *pmap, vm_offset_t va, pt_entry_t pte)
297{
298	register_t asid;
299	register_t s;
300	int i;
301
302	va &= ~PAGE_MASK;
303	pte &= ~TLBLO_SWBITS_MASK;
304
305	s = intr_disable();
306	asid = mips_rd_entryhi() & TLBHI_ASID_MASK;
307
308	mips_wr_pagemask(0);
309	mips_wr_entryhi(TLBHI_ENTRY(va, pmap_asid(pmap)));
310	tlb_probe();
311	i = mips_rd_index();
312	if (i >= 0) {
313		tlb_read();
314
315		if ((va & PAGE_SIZE) == 0) {
316			mips_wr_entrylo0(pte);
317		} else {
318			mips_wr_entrylo1(pte);
319		}
320		tlb_write_indexed();
321	}
322
323	mips_wr_entryhi(asid);
324	intr_restore(s);
325}
326
327static void
328tlb_invalidate_one(unsigned i)
329{
330	/* XXX an invalid ASID? */
331	mips_wr_entryhi(TLBHI_ENTRY(MIPS_KSEG0_START + (2 * i * PAGE_SIZE), 0));
332	mips_wr_entrylo0(0);
333	mips_wr_entrylo1(0);
334	mips_wr_pagemask(0);
335	mips_wr_index(i);
336	tlb_write_indexed();
337}
338
339#ifdef DDB
340#include <ddb/ddb.h>
341
342DB_SHOW_COMMAND(tlb, ddb_dump_tlb)
343{
344	register_t ehi, elo0, elo1, epagemask;
345	unsigned i, cpu, ntlb;
346
347	/*
348	 * XXX
349	 * The worst conversion from hex to decimal ever.
350	 */
351	if (have_addr)
352		cpu = ((addr >> 4) % 16) * 10 + (addr % 16);
353	else
354		cpu = PCPU_GET(cpuid);
355
356	if (cpu < 0 || cpu >= mp_ncpus) {
357		db_printf("Invalid CPU %u\n", cpu);
358		return;
359	}
360	if (num_tlbentries > MIPS_MAX_TLB_ENTRIES) {
361		ntlb = MIPS_MAX_TLB_ENTRIES;
362		db_printf("Warning: Only %d of %d TLB entries saved!\n",
363		    ntlb, num_tlbentries);
364	} else
365		ntlb = num_tlbentries;
366
367	if (cpu == PCPU_GET(cpuid))
368		tlb_save();
369
370	db_printf("Beginning TLB dump for CPU %u...\n", cpu);
371	for (i = 0; i < ntlb; i++) {
372		if (i == tlb_state[cpu].wired) {
373			if (i != 0)
374				db_printf("^^^ WIRED ENTRIES ^^^\n");
375			else
376				db_printf("(No wired entries.)\n");
377		}
378
379		/* XXX PageMask.  */
380		ehi = tlb_state[cpu].entry[i].entryhi;
381		elo0 = tlb_state[cpu].entry[i].entrylo0;
382		elo1 = tlb_state[cpu].entry[i].entrylo1;
383		epagemask = tlb_state[cpu].entry[i].pagemask;
384
385		if (elo0 == 0 && elo1 == 0)
386			continue;
387
388		db_printf("#%u\t=> %jx (pagemask %jx)\n", i, (intmax_t)ehi, (intmax_t) epagemask);
389		db_printf(" Lo0\t%jx\t(%#jx)\n", (intmax_t)elo0, (intmax_t)TLBLO_PTE_TO_PA(elo0));
390		db_printf(" Lo1\t%jx\t(%#jx)\n", (intmax_t)elo1, (intmax_t)TLBLO_PTE_TO_PA(elo1));
391	}
392	db_printf("Finished.\n");
393}
394#endif
395