1/*
2 * Copyright 2004-2008, Axel Dörfler, axeld@pinc-software.de.
3 * Based on code written by Travis Geiselbrecht for NewOS.
4 *
5 * Distributed under the terms of the MIT License.
6 */
7
8
9#include "arch_mmu.h"
10
11#include <boot/platform.h>
12#include <boot/stdio.h>
13#include <boot/kernel_args.h>
14#include <boot/stage2.h>
15#include <arch/cpu.h>
16#include <arch_kernel.h>
17#include <arm_mmu.h>
18#include <kernel.h>
19
20#include <board_config.h>
21
22#include <OS.h>
23
24#include <string.h>
25
26
27//#define TRACE_MMU
28#ifdef TRACE_MMU
29#	define TRACE(x) dprintf x
30#else
31#	define TRACE(x) ;
32#endif
33
34#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
35#define TRACE_MEMORY_MAP
36	// Define this to print the memory map to serial debug,
37	// You also need to define ENABLE_SERIAL in serial.cpp
38	// for output to work.
39
40
41/*
42TODO:
43	-recycle bit!
44*/
45
46/*!	The (physical) memory layout of the boot loader is currently as follows:
47	 0x00000000			u-boot (run from NOR flash)
48	 0xa0000000			u-boot stuff like kernel arguments afaik
49	 0xa0100000 - 0xa0ffffff	boot.tgz (up to 15MB probably never needed so big...)
50	 0xa1000000 - 0xa1ffffff	pagetables
51	 0xa2000000 - ?			code (up to 1MB)
52	 0xa2100000			boot loader heap / free physical memory
53
54	The kernel is mapped at KERNEL_BASE, all other stuff mapped by the
55	loader (kernel args, modules, driver settings, ...) comes after
56	0x80020000 which means that there is currently only 2 MB reserved for
57	the kernel itself (see kMaxKernelSize).
58*/
59
60
61/*
62*defines a block in memory
63*/
64struct memblock {
65	const char name[16];
66		// the name will be used for debugging etc later perhaps...
67	addr_t	start;
68		// start of the block
69	addr_t	end;
70		// end of the block
71	uint32	flags;
72		// which flags should be applied (device/normal etc..)
73};
74
75
76static struct memblock LOADER_MEMORYMAP[] = {
77	{
78		"devices",
79		DEVICE_BASE,
80		DEVICE_BASE + DEVICE_SIZE - 1,
81		ARM_MMU_L2_FLAG_B,
82	},
83	{
84		"RAM_loader", // 1MB loader
85		SDRAM_BASE + 0,
86		SDRAM_BASE + 0x0fffff,
87		ARM_MMU_L2_FLAG_C,
88	},
89	{
90		"RAM_pt", // Page Table 1MB
91		SDRAM_BASE + 0x100000,
92		SDRAM_BASE + 0x1FFFFF,
93		ARM_MMU_L2_FLAG_C,
94	},
95	{
96		"RAM_free", // 16MB free RAM (more but we don't map it automaticaly)
97		SDRAM_BASE + 0x0200000,
98		SDRAM_BASE + 0x11FFFFF,
99		ARM_MMU_L2_FLAG_C,
100	},
101	{
102		"RAM_stack", // stack
103		SDRAM_BASE + 0x1200000,
104		SDRAM_BASE + 0x2000000,
105		ARM_MMU_L2_FLAG_C,
106	},
107	{
108		"RAM_initrd", // stack
109		SDRAM_BASE + 0x2000000,
110		SDRAM_BASE + 0x2500000,
111		ARM_MMU_L2_FLAG_C,
112	},
113
114#ifdef FB_BASE
115	{
116		"framebuffer", // 2MB framebuffer ram
117		FB_BASE,
118		FB_BASE + FB_SIZE - 1,
119		ARM_MMU_L2_FLAG_AP_RW | ARM_MMU_L2_FLAG_C,
120	},
121#endif
122};
123
124
125//static const uint32 kDefaultPageTableFlags = MMU_FLAG_READWRITE;
126	// not cached not buffered, R/W
127static const size_t kMaxKernelSize = 0x200000;		// 2 MB for the kernel
128
129static addr_t sNextPhysicalAddress = 0; //will be set by mmu_init
130static addr_t sNextVirtualAddress = KERNEL_BASE + kMaxKernelSize;
131
132static addr_t sNextPageTableAddress = 0;
133//the page directory is in front of the pagetable
134static uint32 kPageTableRegionEnd = 0;
135
136// working page directory and page table
137static uint32 *sPageDirectory = 0 ;
138//page directory has to be on a multiple of 16MB for
139//some arm processors
140
141
142static addr_t
143get_next_virtual_address(size_t size)
144{
145	addr_t address = sNextVirtualAddress;
146	sNextVirtualAddress += size;
147
148	return address;
149}
150
151
152static addr_t
153get_next_virtual_address_alligned(size_t size, uint32 mask)
154{
155	addr_t address = (sNextVirtualAddress) & mask;
156	sNextVirtualAddress = address + size;
157
158	return address;
159}
160
161
162static addr_t
163get_next_physical_address(size_t size)
164{
165	addr_t address = sNextPhysicalAddress;
166	sNextPhysicalAddress += size;
167
168	return address;
169}
170
171
172static addr_t
173get_next_physical_address_alligned(size_t size, uint32 mask)
174{
175	addr_t address = sNextPhysicalAddress & mask;
176	sNextPhysicalAddress = address + size;
177
178	return address;
179}
180
181
182static addr_t
183get_next_virtual_page(size_t pagesize)
184{
185	return get_next_virtual_address_alligned(pagesize, 0xffffffc0);
186}
187
188
189static addr_t
190get_next_physical_page(size_t pagesize)
191{
192	return get_next_physical_address_alligned(pagesize, 0xffffffc0);
193}
194
195
196/*
197 * Set translation table base
198 */
199void
200mmu_set_TTBR(uint32 ttb)
201{
202	ttb &= 0xffffc000;
203	asm volatile("MCR p15, 0, %[adr], c2, c0, 0"::[adr] "r" (ttb));
204}
205
206
207/*
208 * Flush the TLB
209 */
210void
211mmu_flush_TLB()
212{
213	uint32 value = 0;
214	asm volatile("MCR p15, 0, %[c8format], c8, c7, 0"::[c8format] "r" (value));
215}
216
217
218/*
219 * Read MMU Control Register
220 */
221uint32
222mmu_read_C1()
223{
224	uint32 controlReg = 0;
225	asm volatile("MRC p15, 0, %[c1out], c1, c0, 0":[c1out] "=r" (controlReg));
226	return controlReg;
227}
228
229
230/*
231 * Write MMU Control Register
232 */
233void
234mmu_write_C1(uint32 value)
235{
236	asm volatile("MCR p15, 0, %[c1in], c1, c0, 0"::[c1in] "r" (value));
237}
238
239
240void
241mmu_write_DACR(uint32 value)
242{
243	asm volatile("MCR p15, 0, %[c1in], c3, c0, 0"::[c1in] "r" (value));
244}
245
246
247static uint32 *
248get_next_page_table(uint32 type)
249{
250	TRACE(("get_next_page_table, sNextPageTableAddress 0x%" B_PRIxADDR
251		", kPageTableRegionEnd 0x%" B_PRIxADDR ", type 0x%" B_PRIx32 "\n",
252		sNextPageTableAddress, kPageTableRegionEnd, type));
253
254	size_t size = 0;
255	size_t entryCount = 0;
256	switch (type) {
257		case ARM_MMU_L1_TYPE_COARSE:
258			size = ARM_MMU_L2_COARSE_TABLE_SIZE;
259			entryCount = ARM_MMU_L2_COARSE_ENTRY_COUNT;
260			break;
261		case ARM_MMU_L1_TYPE_FINE:
262			size = ARM_MMU_L2_FINE_TABLE_SIZE;
263			entryCount = ARM_MMU_L2_FINE_ENTRY_COUNT;
264			break;
265		case ARM_MMU_L1_TYPE_SECTION:
266			// TODO: Figure out parameters for section types.
267			size = 16384;
268			break;
269		default:
270			panic("asked for unknown page table type: %#" B_PRIx32 "\n", type);
271			return NULL;
272	}
273
274	addr_t address = sNextPageTableAddress;
275	if (address < kPageTableRegionEnd)
276		sNextPageTableAddress += size;
277	else {
278		TRACE(("page table allocation outside of pagetable region!\n"));
279		address = get_next_physical_address_alligned(size, 0xffffffc0);
280	}
281
282	uint32 *pageTable = (uint32 *)address;
283	for (size_t i = 0; i < entryCount; i++)
284		pageTable[i] = 0;
285
286	return pageTable;
287}
288
289
290static uint32 *
291get_or_create_page_table(addr_t address, uint32 type)
292{
293	uint32 *pageTable = NULL;
294	uint32 pageDirectoryIndex = VADDR_TO_PDENT(address);
295	uint32 pageDirectoryEntry = sPageDirectory[pageDirectoryIndex];
296
297	uint32 entryType = pageDirectoryEntry & ARM_PDE_TYPE_MASK;
298	if (entryType == ARM_MMU_L1_TYPE_FAULT) {
299		// This page directory entry has not been set yet, allocate it.
300		pageTable = get_next_page_table(type);
301		sPageDirectory[pageDirectoryIndex] = (uint32)pageTable | type;
302		return pageTable;
303	}
304
305	if (entryType != type) {
306		// This entry has been allocated with a different type!
307		panic("tried to reuse page directory entry %" B_PRIu32
308			" with different type (entry: %#" B_PRIx32 ", new type: %#" B_PRIx32
309			")\n", pageDirectoryIndex, pageDirectoryEntry, type);
310		return NULL;
311	}
312
313	return (uint32 *)(pageDirectoryEntry & ARM_PDE_ADDRESS_MASK);
314}
315
316
317void
318init_page_directory()
319{
320	TRACE(("init_page_directory\n"));
321	uint32 smallType;
322
323	// see if subpages are disabled
324	if (mmu_read_C1() & (1 << 23))
325		smallType = ARM_MMU_L2_TYPE_SMALLNEW;
326	else
327		smallType = ARM_MMU_L2_TYPE_SMALLEXT;
328
329	gKernelArgs.arch_args.phys_pgdir = (uint32)sPageDirectory;
330
331	// clear out the page directory
332	for (uint32 i = 0; i < ARM_MMU_L1_TABLE_ENTRY_COUNT; i++)
333		sPageDirectory[i] = 0;
334
335	for (uint32 i = 0; i < ARRAY_SIZE(LOADER_MEMORYMAP); i++) {
336
337		TRACE(("BLOCK: %s START: %lx END %lx\n", LOADER_MEMORYMAP[i].name,
338			LOADER_MEMORYMAP[i].start, LOADER_MEMORYMAP[i].end));
339
340		addr_t address = LOADER_MEMORYMAP[i].start;
341		ASSERT((address & ~ARM_PTE_ADDRESS_MASK) == 0);
342
343		uint32 *pageTable = NULL;
344		uint32 pageTableIndex = 0;
345
346		while (address < LOADER_MEMORYMAP[i].end) {
347			if (pageTable == NULL
348				|| pageTableIndex >= ARM_MMU_L2_COARSE_ENTRY_COUNT) {
349				pageTable = get_or_create_page_table(address,
350					ARM_MMU_L1_TYPE_COARSE);
351				pageTableIndex = VADDR_TO_PTENT(address);
352			}
353
354			pageTable[pageTableIndex++]
355				= address | LOADER_MEMORYMAP[i].flags | smallType;
356			address += B_PAGE_SIZE;
357		}
358	}
359
360	// Map the page directory itself.
361	addr_t virtualPageDirectory = mmu_map_physical_memory(
362		(addr_t)sPageDirectory, ARM_MMU_L1_TABLE_SIZE, kDefaultPageFlags);
363
364	mmu_flush_TLB();
365
366	/* set up the translation table base */
367	mmu_set_TTBR((uint32)sPageDirectory);
368
369	mmu_flush_TLB();
370
371	/* set up the domain access register */
372	mmu_write_DACR(0xFFFFFFFF);
373
374	/* turn on the mmu */
375	mmu_write_C1(mmu_read_C1() | 0x1);
376
377	// Use the mapped page directory from now on.
378	sPageDirectory = (uint32 *)virtualPageDirectory;
379	gKernelArgs.arch_args.vir_pgdir = virtualPageDirectory;
380}
381
382
383/*!	Creates an entry to map the specified virtualAddress to the given
384	physicalAddress.
385	If the mapping goes beyond the current page table, it will allocate
386	a new one. If it cannot map the requested page, it panics.
387*/
388static void
389map_page(addr_t virtualAddress, addr_t physicalAddress, uint32 flags)
390{
391	TRACE(("map_page: vaddr 0x%lx, paddr 0x%lx\n", virtualAddress,
392		physicalAddress));
393
394	if (virtualAddress < KERNEL_BASE) {
395		panic("map_page: asked to map invalid page %p!\n",
396			(void *)virtualAddress);
397	}
398
399	physicalAddress &= ~(B_PAGE_SIZE - 1);
400
401	// map the page to the correct page table
402	uint32 *pageTable = get_or_create_page_table(virtualAddress,
403		ARM_MMU_L1_TYPE_COARSE);
404
405	uint32 pageTableIndex = VADDR_TO_PTENT(virtualAddress);
406	TRACE(("map_page: inserting pageTable %p, tableEntry 0x%" B_PRIx32
407		", physicalAddress 0x%" B_PRIxADDR "\n", pageTable, pageTableIndex,
408		physicalAddress));
409
410	pageTable[pageTableIndex] = physicalAddress | flags;
411
412	mmu_flush_TLB();
413
414	TRACE(("map_page: done\n"));
415}
416
417
418//	#pragma mark -
419
420
421extern "C" addr_t
422mmu_map_physical_memory(addr_t physicalAddress, size_t size, uint32 flags)
423{
424	addr_t address = sNextVirtualAddress;
425	addr_t pageOffset = physicalAddress & (B_PAGE_SIZE - 1);
426
427	physicalAddress -= pageOffset;
428
429	for (addr_t offset = 0; offset < size; offset += B_PAGE_SIZE) {
430		map_page(get_next_virtual_page(B_PAGE_SIZE), physicalAddress + offset,
431			flags);
432	}
433
434	return address + pageOffset;
435}
436
437
438static void
439unmap_page(addr_t virtualAddress)
440{
441	TRACE(("unmap_page(virtualAddress = %p)\n", (void *)virtualAddress));
442
443	if (virtualAddress < KERNEL_BASE) {
444		panic("unmap_page: asked to unmap invalid page %p!\n",
445			(void *)virtualAddress);
446	}
447
448	// unmap the page from the correct page table
449	uint32 *pageTable
450		= (uint32 *)(sPageDirectory[VADDR_TO_PDENT(virtualAddress)]
451			& ARM_PDE_ADDRESS_MASK);
452
453	pageTable[VADDR_TO_PTENT(virtualAddress)] = 0;
454
455	mmu_flush_TLB();
456}
457
458
459extern "C" void *
460mmu_allocate(void *virtualAddress, size_t size)
461{
462	TRACE(("mmu_allocate: requested vaddr: %p, next free vaddr: 0x%lx, size: "
463		"%ld\n", virtualAddress, sNextVirtualAddress, size));
464
465	size = (size + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
466		// get number of pages to map
467
468	if (virtualAddress != NULL) {
469		// This special path is almost only useful for loading the
470		// kernel into memory; it will only allow you to map the
471		// 'kMaxKernelSize' bytes following the kernel base address.
472		// Also, it won't check for already mapped addresses, so
473		// you better know why you are here :)
474		addr_t address = (addr_t)virtualAddress;
475
476		// is the address within the valid range?
477		if (address < KERNEL_BASE
478			|| address + size >= KERNEL_BASE + kMaxKernelSize) {
479			TRACE(("mmu_allocate in illegal range\n address: %" B_PRIx32
480				"  KERNELBASE: %" B_PRIx32 " KERNEL_BASE + kMaxKernelSize: %"
481				B_PRIx32 "  address + size : %" B_PRIx32 "\n", (uint32)address,
482				(uint32)KERNEL_BASE, (uint32)KERNEL_BASE + kMaxKernelSize,
483				(uint32)(address + size)));
484			return NULL;
485		}
486		for (uint32 i = 0; i < size; i++) {
487			map_page(address, get_next_physical_page(B_PAGE_SIZE),
488				kDefaultPageFlags);
489			address += B_PAGE_SIZE;
490		}
491
492		return virtualAddress;
493	}
494
495	void *address = (void *)sNextVirtualAddress;
496
497	for (uint32 i = 0; i < size; i++) {
498		map_page(get_next_virtual_page(B_PAGE_SIZE),
499			get_next_physical_page(B_PAGE_SIZE), kDefaultPageFlags);
500	}
501
502	return address;
503}
504
505
506/*!	This will unmap the allocated chunk of memory from the virtual
507	address space. It might not actually free memory (as its implementation
508	is very simple), but it might.
509*/
510extern "C" void
511mmu_free(void *virtualAddress, size_t size)
512{
513	TRACE(("mmu_free(virtualAddress = %p, size: %ld)\n", virtualAddress, size));
514
515	addr_t address = (addr_t)virtualAddress;
516	size = (size + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
517		// get number of pages to map
518
519	// is the address within the valid range?
520	if (address < KERNEL_BASE
521		|| address + size >= KERNEL_BASE + kMaxKernelSize) {
522		panic("mmu_free: asked to unmap out of range region (%p, size %lx)\n",
523			(void *)address, size);
524	}
525
526	// unmap all pages within the range
527	for (uint32 i = 0; i < size; i++) {
528		unmap_page(address);
529		address += B_PAGE_SIZE;
530	}
531
532	if (address == sNextVirtualAddress) {
533		// we can actually reuse the virtual address space
534		sNextVirtualAddress -= size;
535	}
536}
537
538
539/*!	Sets up the final and kernel accessible GDT and IDT tables.
540	BIOS calls won't work any longer after this function has
541	been called.
542*/
543extern "C" void
544mmu_init_for_kernel(void)
545{
546	TRACE(("mmu_init_for_kernel\n"));
547
548	// save the memory we've physically allocated
549	gKernelArgs.physical_allocated_range[0].size
550		= sNextPhysicalAddress - gKernelArgs.physical_allocated_range[0].start;
551
552	// Save the memory we've virtually allocated (for the kernel and other
553	// stuff)
554	gKernelArgs.virtual_allocated_range[0].start = KERNEL_BASE;
555	gKernelArgs.virtual_allocated_range[0].size
556		= sNextVirtualAddress - KERNEL_BASE;
557	gKernelArgs.num_virtual_allocated_ranges = 1;
558
559#ifdef TRACE_MEMORY_MAP
560	{
561		uint32 i;
562
563		dprintf("phys memory ranges:\n");
564		for (i = 0; i < gKernelArgs.num_physical_memory_ranges; i++) {
565			dprintf("    base 0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
566				gKernelArgs.physical_memory_range[i].start,
567				gKernelArgs.physical_memory_range[i].size);
568		}
569
570		dprintf("allocated phys memory ranges:\n");
571		for (i = 0; i < gKernelArgs.num_physical_allocated_ranges; i++) {
572			dprintf("    base 0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
573				gKernelArgs.physical_allocated_range[i].start,
574				gKernelArgs.physical_allocated_range[i].size);
575		}
576
577		dprintf("allocated virt memory ranges:\n");
578		for (i = 0; i < gKernelArgs.num_virtual_allocated_ranges; i++) {
579			dprintf("    base 0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
580				gKernelArgs.virtual_allocated_range[i].start,
581				gKernelArgs.virtual_allocated_range[i].size);
582		}
583	}
584#endif
585}
586
587
588extern "C" void
589mmu_init(void)
590{
591	TRACE(("mmu_init\n"));
592
593	mmu_write_C1(mmu_read_C1() & ~((1 << 29) | (1 << 28) | (1 << 0)));
594		// access flag disabled, TEX remap disabled, mmu disabled
595
596	uint32 highestRAMAddress = SDRAM_BASE;
597
598	// calculate lowest RAM adress from MEMORYMAP
599	for (uint32 i = 0; i < ARRAY_SIZE(LOADER_MEMORYMAP); i++) {
600		if (strcmp("RAM_free", LOADER_MEMORYMAP[i].name) == 0)
601			sNextPhysicalAddress = LOADER_MEMORYMAP[i].start;
602
603		if (strcmp("RAM_pt", LOADER_MEMORYMAP[i].name) == 0) {
604			sNextPageTableAddress = LOADER_MEMORYMAP[i].start
605				+ ARM_MMU_L1_TABLE_SIZE;
606			kPageTableRegionEnd = LOADER_MEMORYMAP[i].end;
607			sPageDirectory = (uint32 *)LOADER_MEMORYMAP[i].start;
608		}
609
610		if (strncmp("RAM_", LOADER_MEMORYMAP[i].name, 4) == 0) {
611			if (LOADER_MEMORYMAP[i].end > highestRAMAddress)
612				highestRAMAddress = LOADER_MEMORYMAP[i].end;
613		}
614	}
615
616	gKernelArgs.physical_memory_range[0].start = SDRAM_BASE;
617	gKernelArgs.physical_memory_range[0].size = highestRAMAddress - SDRAM_BASE;
618	gKernelArgs.num_physical_memory_ranges = 1;
619
620	gKernelArgs.physical_allocated_range[0].start = SDRAM_BASE;
621	gKernelArgs.physical_allocated_range[0].size = 0;
622	gKernelArgs.num_physical_allocated_ranges = 1;
623		// remember the start of the allocated physical pages
624
625	init_page_directory();
626
627	// map in a kernel stack
628	gKernelArgs.cpu_kstack[0].size = KERNEL_STACK_SIZE
629		+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
630	gKernelArgs.cpu_kstack[0].start = (addr_t)mmu_allocate(NULL,
631		gKernelArgs.cpu_kstack[0].size);
632
633	TRACE(("kernel stack at 0x%" B_PRIx64 " to 0x%" B_PRIx64 "\n",
634		gKernelArgs.cpu_kstack[0].start,
635		gKernelArgs.cpu_kstack[0].start + gKernelArgs.cpu_kstack[0].size));
636}
637
638
639//	#pragma mark -
640
641
642extern "C" status_t
643platform_allocate_region(void **_address, size_t size, uint8 protection,
644	bool /*exactAddress*/)
645{
646	void *address = mmu_allocate(*_address, size);
647	if (address == NULL)
648		return B_NO_MEMORY;
649
650	*_address = address;
651	return B_OK;
652}
653
654
655extern "C" status_t
656platform_free_region(void *address, size_t size)
657{
658	mmu_free(address, size);
659	return B_OK;
660}
661
662
663void
664platform_release_heap(struct stage2_args *args, void *base)
665{
666	// It will be freed automatically, since it is in the
667	// identity mapped region, and not stored in the kernel's
668	// page tables.
669}
670
671
672status_t
673platform_init_heap(struct stage2_args *args, void **_base, void **_top)
674{
675	void *heap = (void *)get_next_physical_address(args->heap_size);
676	if (heap == NULL)
677		return B_NO_MEMORY;
678
679	*_base = heap;
680	*_top = (void *)((int8 *)heap + args->heap_size);
681	return B_OK;
682}
683