1/*
2 * Copyright 2004-2007, Axel Dörfler, axeld@pinc-software.de.
3 * Based on code written by Travis Geiselbrecht for NewOS.
4 *
5 * Distributed under the terms of the MIT License.
6 */
7
8
9#include "mmu.h"
10
11#include <boot/platform.h>
12#include <boot/stdio.h>
13#include <boot/kernel_args.h>
14#include <boot/stage2.h>
15#include <arch/cpu.h>
16#include <arch_kernel.h>
17#include <kernel.h>
18
19#include <OS.h>
20
21#include <string.h>
22
23#include "arch_040_mmu.h"
24
25
26//#define TRACE_MMU
27#ifdef TRACE_MMU
28#	define TRACE(x) dprintf x
29#else
30#	define TRACE(x) ;
31#endif
32
33
34extern page_root_entry *gPageRoot;
35
36//XXX: the milan BIOS uses the mmu for itself,
37// likely to virtualize missing Atari I/O ports...
38// tcr: c000 (enabled, 8k pages :()
39// dtt0: 803fe140	0x80000000 & ~0x3f... en ignFC2 U=1 CI,S  RW
40// dtt1: 403fe060	0x40000000 & ~0x3f... en ignFC2 U=0 CI,NS RW
41// itt0: 803fe040	0x80000000 & ~0x3f... en ignFC2 U=0 CI,S  RW
42// itt1: 403fe000	0x40000000 & ~0x3f... en ignFC2 U=0 C,WT  RW
43// srp:  00dfde00
44// urp:  00dfde00
45
46static void
47dump_mmu(void)
48{
49	uint32 dttr0, dttr1;
50	uint32 ittr0, ittr1;
51	uint32 srp, urp;
52	uint32 tcr;
53
54	TRACE(("mmu_040:dump:\n"));
55
56	asm volatile("movec %%tcr,%0\n" : "=d"(tcr) :);
57	TRACE(("tcr:\t%08lx\n", tcr));
58
59	asm volatile("movec %%dtt0,%0\n" : "=d"(dttr0) :);
60	TRACE(("dtt0:\t%08lx\n", dttr0));
61	asm volatile("movec %%dtt1,%0\n" : "=d"(dttr1) :);
62	TRACE(("dtt1:\t%08lx\n", dttr1));
63
64	asm volatile("movec %%itt0,%0\n" : "=d"(ittr0) :);
65	TRACE(("itt0:\t%08lx\n", ittr0));
66	asm volatile("movec %%itt1,%0\n" : "=d"(ittr1) :);
67	TRACE(("itt1:\t%08lx\n", ittr1));
68
69	asm volatile("movec %%srp,%0\n" : "=d"(srp) :);
70	TRACE(("srp:\t%08lx\n", srp));
71	asm volatile("movec %%urp,%0\n" : "=d"(urp) :);
72	TRACE(("urp:\t%08lx\n", urp));
73
74	TRACE(("mmu_040:dump:\n"));
75}
76
77
78static void
79initialize(void)
80{
81	dump_mmu();
82	TRACE(("mmu_040:initialize\n"));
83}
84
85
86static status_t
87set_tt(int which, addr_t pa, size_t len, uint32 perms /* NOTUSED */)
88{
89	TRACE(("mmu_040:set_tt(%d, 0x%lx, 0x%lx, 0x%08lx)\n", which, pa, len, perms));
90	uint32 mask;
91	uint32 ttr = 0;
92	mask = 0x0000ffff;
93	if (len) {
94		len = (len >> 24) & 0x00ff;
95		while (len >>= 1)
96			mask <<= 1;
97		// enable, super only, upa=0,
98		// cachable write-through, rw
99		ttr = 0x0a000;
100		ttr |= (pa & 0xff000000);
101		ttr |= (mask & 0x00ff0000);
102	}
103	TRACE(("mmu_040:set_tt: 0x%08lx\n", ttr));
104
105
106	switch (which) {
107		case 0:
108			asm volatile(  \
109				"movec %0,%%dtt0\n"				\
110				"movec %0,%%itt0\n"				\
111				: : "d"(ttr));
112			break;
113		case 1:
114			asm volatile(  \
115				"movec %0,%%dtt1\n"				\
116				"movec %0,%%itt1\n"				\
117				: : "d"(ttr));
118			break;
119		default:
120			return EINVAL;
121	}
122	return B_OK;
123}
124
125
126static status_t
127load_rp(addr_t pa)
128{
129	TRACE(("mmu_040:load_rp(0x%lx)\n", pa));
130	// sanity check
131	if (pa & ((1 << 9) - 1)) {
132		panic("mmu root pointer missaligned!");
133		return EINVAL;
134	}
135	// make sure it's empty
136	page_directory_entry *pr = (page_directory_entry *)pa;
137	for (int32 j = 0; j < NUM_ROOTENT_PER_TBL; j++)
138		pr[j] = DFL_ROOTENT_VAL;
139
140	/* mc68040 user's manual, 6-37 */
141	/* pflush before... why not after ? */
142	asm volatile(		   \
143		"pflusha\n"		   \
144		"movec %0,%%srp\n" \
145		"movec %0,%%urp\n" \
146		"pflusha\n"		   \
147		: : "d"(pa));
148	return B_OK;
149}
150
151
152static status_t
153allocate_kernel_pgdirs(void)
154{
155	page_root_entry *pr = gPageRoot;
156	page_directory_entry *pd;
157	addr_t tbl;
158	int i;
159
160	// we'll fill in the 2nd half with ready made page dirs
161	for (i = NUM_ROOTENT_PER_TBL/2; i < NUM_ROOTENT_PER_TBL; i++) {
162		if (i % NUM_DIRTBL_PER_PAGE)
163			tbl += SIZ_DIRTBL;
164		else
165			tbl = mmu_get_next_page_tables();
166		pr[i] = DT_ROOT | TA_TO_PREA(tbl);
167		pd = (page_directory_entry *)tbl;
168		for (int32 j = 0; j < NUM_DIRENT_PER_TBL; j++)
169			pd[j] = DFL_DIRENT_VAL;
170	}
171	return B_OK;
172}
173
174
175static status_t
176enable_paging(void)
177{
178	TRACE(("mmu_040:enable_paging\n"));
179	uint16 tcr = 0x8000; // Enable, 4K page size
180	asm volatile( \
181		"pflusha\n"		   \
182		"movec %0,%%tcr\n" \
183		"pflusha\n"		   \
184		: : "d"(tcr));
185	return B_OK;
186}
187
188
189static status_t
190add_page_table(addr_t virtualAddress)
191{
192	page_root_entry *pr = gPageRoot;
193	page_directory_entry *pd;
194	page_table_entry *pt;
195	addr_t tbl;
196	uint32 index;
197	uint32 i;
198
199	TRACE(("mmu->add_page_table(base = %p)\n", (void *)virtualAddress));
200
201	// everything much simpler here because pa = va
202	// thanks to transparent translation
203
204	index = VADDR_TO_PRENT(virtualAddress);
205	if (PRE_TYPE(pr[index]) != DT_ROOT)
206		panic("invalid page root entry %d\n", index);
207#if 0
208	// not needed anymore
209	if (PRE_TYPE(pr[index]) != DT_ROOT) {
210		unsigned aindex = index & ~(NUM_DIRTBL_PER_PAGE-1); /* aligned */
211		//TRACE(("missing page root entry %d ai %d\n", index, aindex));
212		tbl = mmu_get_next_page_tables();
213		if (!tbl)
214			return ENOMEM;
215		// for each pgdir on the allocated page:
216		for (i = 0; i < NUM_DIRTBL_PER_PAGE; i++) {
217			page_root_entry *apr = &pr[aindex + i];
218			apr->addr = TA_TO_PREA(tbl);
219			apr->type = DT_ROOT;
220			//TRACE(("inserting tbl @ %p as %08x pr[%d] %08x\n", tbl, TA_TO_PREA(tbl), aindex + i, *(uint32 *)apr));
221			// clear the table
222			//TRACE(("clearing table[%d]\n", i));
223			pd = (page_directory_entry *)tbl;
224			for (int32 j = 0; j < NUM_DIRENT_PER_TBL; j++)
225				*(page_directory_entry_scalar *)(&pd[j]) = DFL_DIRENT_VAL;
226			tbl += SIZ_DIRTBL;
227		}
228	}
229#endif
230	pd = (page_directory_entry *)PRE_TO_TA(pr[index]);
231
232	index = VADDR_TO_PDENT(virtualAddress);
233	if (PDE_TYPE(pd[index]) != DT_DIR) {
234		unsigned aindex = index & ~(NUM_PAGETBL_PER_PAGE-1); /* aligned */
235		//TRACE(("missing page dir entry %d ai %d\n", index, aindex));
236		tbl = mmu_get_next_page_tables();
237		if (!tbl)
238			return ENOMEM;
239		// for each pgdir on the allocated page:
240		for (i = 0; i < NUM_PAGETBL_PER_PAGE; i++) {
241			page_directory_entry *apd = &pd[aindex + i];
242			pd[aindex + i] = DT_DIR | TA_TO_PDEA(tbl);
243			// clear the table
244			//TRACE(("clearing table[%d]\n", i));
245			pt = (page_table_entry *)tbl;
246			for (int32 j = 0; j < NUM_PAGEENT_PER_TBL; j++)
247				pt[j] = DFL_PAGEENT_VAL;
248			tbl += SIZ_PAGETBL;
249		}
250	}
251#if 0
252	pt = PDE_TO_TA(pd[index]);
253
254	index = VADDR_TO_PTENT(virtualAddress);
255	pt[index].addr = TA_TO_PTEA(0xdeadb00b);
256	pt[index].supervisor = 1;
257	pt[index].type = DT_PAGE;
258#endif
259	return B_OK;
260}
261
262
263static page_table_entry *
264lookup_pte(addr_t virtualAddress)
265{
266	page_root_entry *pr = gPageRoot;
267	page_directory_entry *pd;
268	page_table_entry *pt;
269	uint32 rindex, dindex, pindex;
270
271	rindex = VADDR_TO_PRENT(virtualAddress);
272	if (PRE_TYPE(pr[rindex]) != DT_ROOT)
273		panic("lookup_pte: invalid entry pgrt[%d]", rindex);
274	pd = (page_directory_entry *)PRE_TO_TA(pr[rindex]);
275
276	dindex = VADDR_TO_PDENT(virtualAddress);
277	if (PDE_TYPE(pd[dindex]) != DT_DIR)
278		panic("lookup_pte: invalid entry pgrt[%d] prdir[%d]", rindex, dindex);
279	pt = (page_table_entry *)PDE_TO_TA(pd[dindex]);
280
281	pindex = VADDR_TO_PTENT(virtualAddress);
282#if 0 // of course, it's used in map_page!
283	if (PTE_TYPE(pt[pindex]) != DT_PAGE)
284		panic("lookup_pte: invalid entry pgrt[%d] prdir[%d] pgtbl[%d]",
285			rindex, dindex, pindex);
286#endif
287
288	return (&pt[pindex]);
289}
290
291
292static void
293unmap_page(addr_t virtualAddress)
294{
295	page_table_entry *pt;
296
297	TRACE(("mmu->unmap_page(virtualAddress = %p)\n", (void *)virtualAddress));
298
299	if (virtualAddress < KERNEL_BASE)
300		panic("unmap_page: asked to unmap invalid page %p!\n",
301			(void *)virtualAddress);
302
303	// unmap the page from the correct page table
304	pt = lookup_pte(virtualAddress);
305
306	if (PTE_TYPE(*pt) != DT_PAGE)
307		panic("unmap_page: asked to map non-existing page for %08x\n",
308			virtualAddress);
309
310	*pt = DT_INVALID | TA_TO_PTEA(0xdeadb00b);
311
312	// flush ATC
313	asm volatile("pflush (%0)" : : "a" (virtualAddress));
314}
315
316
317/** insert the physical address into existing page table */
318static void
319map_page(addr_t virtualAddress, addr_t physicalAddress, uint32 flags)
320{
321	page_table_entry *pt;
322
323	TRACE(("mmu->map_page: vaddr 0x%lx, paddr 0x%lx\n", virtualAddress, physicalAddress));
324
325
326	physicalAddress &= ~(B_PAGE_SIZE - 1);
327
328	// map the page to the correct page table
329
330	pt = lookup_pte(virtualAddress);
331
332	if (PTE_TYPE(*pt) != DT_INVALID)
333		panic("map_page: asked to map existing page for %08x\n",
334			virtualAddress);
335
336	TRACE(("map_page: inserting pageTableEntry %p, physicalAddress %p\n",
337		pt, physicalAddress));
338
339
340	*pt = DT_PAGE
341		| TA_TO_PTEA(physicalAddress)
342#ifdef MMU_HAS_GLOBAL_PAGES
343		| M68K_PTE_GLOBAL
344#endif
345		| M68K_PTE_SUPERVISOR;
346	// XXX: are flags needed ? ro ? global ?
347
348	// flush ATC
349	asm volatile("pflush (%0)" : : "a" (virtualAddress));
350
351	TRACE(("mmu->map_page: done\n"));
352}
353
354
355
356
357const struct boot_mmu_ops k040MMUOps = {
358	&initialize,
359	&set_tt,
360	&load_rp,
361	&allocate_kernel_pgdirs,
362	&enable_paging,
363	&add_page_table,
364	&unmap_page,
365	&map_page
366};
367