1/*
2 * Copyright 2004-2007, Axel D��rfler, axeld@pinc-software.de.
3 * Based on code written by Travis Geiselbrecht for NewOS.
4 *
5 * Distributed under the terms of the MIT License.
6 */
7
8
9#include "mmu.h"
10
11#include <boot/platform.h>
12#include <boot/stdio.h>
13#include <boot/kernel_args.h>
14#include <boot/stage2.h>
15#include <arch/cpu.h>
16#include <arch_kernel.h>
17#include <kernel.h>
18#include <AutoDeleter.h>
19
20#include <OS.h>
21
22#include <string.h>
23
24
25struct MemoryRegion
26{
27	MemoryRegion* next;
28	addr_t virtAdr;
29	phys_addr_t physAdr;
30	size_t size;
31	uint32 protection;
32};
33
34
35extern uint8 gStackEnd;
36
37uint8* gMemBase = NULL;
38size_t gTotalMem = 0;
39uint8* gFreeMem = &gStackEnd;
40addr_t gFreeVirtMem = KERNEL_LOAD_BASE;
41
42MemoryRegion* sRegions = NULL;
43
44ssize_t gVirtFromPhysOffset = 0;
45phys_addr_t sPageTable = 0;
46
47
48static void
49WritePteFlags(uint32 flags)
50{
51	bool first = true;
52	dprintf("{");
53	for (uint32 i = 0; i < 32; i++) {
54		if ((1 << i) & flags) {
55			if (first) first = false; else dprintf(", ");
56			switch (i) {
57			case 0:  dprintf("valid"); break;
58			case 1:  dprintf("read"); break;
59			case 2:  dprintf("write"); break;
60			case 3:  dprintf("exec"); break;
61			case 4:  dprintf("user"); break;
62			case 5:  dprintf("global"); break;
63			case 6:  dprintf("accessed"); break;
64			case 7:  dprintf("dirty"); break;
65			default: dprintf("%" B_PRIu32, i);
66			}
67		}
68	}
69	dprintf("}");
70}
71
72
73static phys_addr_t
74AllocPhysPages(size_t size)
75{
76	size = ROUNDUP(size, B_PAGE_SIZE);
77	phys_addr_t adr = ROUNDUP((addr_t)gFreeMem, B_PAGE_SIZE);
78
79	if (adr + size - (addr_t)gMemBase > gTotalMem)
80		return 0;
81
82	gFreeMem = (uint8*)(adr + size);
83
84	return adr;
85}
86
87
88static phys_addr_t
89AllocPhysPage()
90{
91	return AllocPhysPages(B_PAGE_SIZE);
92}
93
94
95static void
96FreePhysPages(phys_addr_t physAdr, size_t size)
97{
98	if (physAdr + size == (phys_addr_t)gFreeMem)
99		gFreeMem -= size;
100}
101
102
103static phys_addr_t
104AllocVirtPages(size_t size)
105{
106	size = ROUNDUP(size, B_PAGE_SIZE);
107	phys_addr_t adr = ROUNDUP(gFreeVirtMem, B_PAGE_SIZE);
108	gFreeVirtMem = adr + size;
109
110	return adr;
111}
112
113
114static void
115FreeVirtPages(addr_t virtAdr, size_t size)
116{
117	if (virtAdr + size == gFreeVirtMem)
118		gFreeVirtMem -= size;
119}
120
121
122static inline void*
123VirtFromPhys(phys_addr_t physAdr)
124{
125	return (void*)physAdr;
126}
127
128
129static inline phys_addr_t
130PhysFromVirt(void* virtAdr)
131{
132	return (phys_addr_t)virtAdr;
133}
134
135
136static Pte*
137LookupPte(addr_t virtAdr, bool alloc)
138{
139	Pte *pte = (Pte*)VirtFromPhys(sPageTable);
140	for (int level = 2; level > 0; level--) {
141		pte += VirtAdrPte(virtAdr, level);
142		if (!pte->isValid) {
143			if (!alloc)
144				return NULL;
145			uint64 ppn = AllocPhysPage() / B_PAGE_SIZE;
146			if (ppn == 0)
147				return NULL;
148			memset((Pte*)VirtFromPhys(B_PAGE_SIZE * ppn), 0, B_PAGE_SIZE);
149			Pte newPte {
150				.isValid = true,
151				.isGlobal = IS_KERNEL_ADDRESS(virtAdr),
152				.ppn = ppn
153			};
154			pte->val = newPte.val;
155		}
156		pte = (Pte*)VirtFromPhys(B_PAGE_SIZE * pte->ppn);
157	}
158	pte += VirtAdrPte(virtAdr, 0);
159	return pte;
160}
161
162
163static void
164Map(addr_t virtAdr, phys_addr_t physAdr, uint64 flags)
165{
166	// dprintf("Map(0x%" B_PRIxADDR ", 0x%" B_PRIxADDR ")\n", virtAdr, physAdr);
167	Pte* pte = LookupPte(virtAdr, true);
168	if (pte == NULL)
169		panic("can't allocate page table");
170
171	Pte newPte {
172		.isValid = true,
173		.isGlobal = IS_KERNEL_ADDRESS(virtAdr),
174		.isAccessed = true,
175		.isDirty = true,
176		.ppn = physAdr / B_PAGE_SIZE
177	};
178	newPte.val |= flags;
179
180	pte->val = newPte.val;
181}
182
183
184static void
185MapRange(addr_t virtAdr, phys_addr_t physAdr, size_t size, uint64 flags)
186{
187	dprintf("MapRange(0x%" B_PRIxADDR ", 0x%" B_PRIxADDR ", 0x%"
188		B_PRIxADDR ", ", virtAdr, physAdr, size);
189	WritePteFlags(flags);
190	dprintf(")\n");
191	for (size_t i = 0; i < size; i += B_PAGE_SIZE)
192		Map(virtAdr + i, physAdr + i, flags);
193
194	ASSERT_ALWAYS(insert_virtual_allocated_range(virtAdr, size) >= B_OK);
195}
196
197
198static void
199MapRangeIdentity(addr_t adr, size_t size, uint64 flags)
200{
201	MapRange(adr, adr, size, flags);
202}
203
204
205static void
206MapAddrRange(addr_range& range, uint64 flags)
207{
208	phys_addr_t physAdr = range.start;
209	range.start = AllocVirtPages(range.size);
210
211	MapRange(range.start, physAdr, range.size, flags);
212
213	if (gKernelArgs.arch_args.num_virtual_ranges_to_keep
214		>= MAX_VIRTUAL_RANGES_TO_KEEP)
215		panic("too many virtual ranges to keep");
216
217	gKernelArgs.arch_args.virtual_ranges_to_keep[
218		gKernelArgs.arch_args.num_virtual_ranges_to_keep++] = range;
219}
220
221
222static void
223PreallocKernelRange()
224{
225	Pte *root = (Pte*)VirtFromPhys(sPageTable);
226	for (uint64 i = VirtAdrPte(KERNEL_BASE, 2); i <= VirtAdrPte(KERNEL_TOP, 2);
227		i++) {
228		Pte* pte = &root[i];
229		uint64 ppn = AllocPhysPage() / B_PAGE_SIZE;
230		if (ppn == 0) panic("can't alloc early physical page");
231		memset(VirtFromPhys(B_PAGE_SIZE * ppn), 0, B_PAGE_SIZE);
232		Pte newPte {
233			.isValid = true,
234			.isGlobal = true,
235			.ppn = ppn
236		};
237		pte->val = newPte.val;
238	}
239}
240
241
242static void
243SetupPageTable()
244{
245	sPageTable = AllocPhysPage();
246	memset(VirtFromPhys(sPageTable), 0, B_PAGE_SIZE);
247
248	PreallocKernelRange();
249
250	// Physical memory mapping
251	gKernelArgs.arch_args.physMap.size
252		= gKernelArgs.physical_memory_range[0].size;
253	gKernelArgs.arch_args.physMap.start = KERNEL_TOP + 1
254		- gKernelArgs.arch_args.physMap.size;
255	MapRange(gKernelArgs.arch_args.physMap.start,
256		gKernelArgs.physical_memory_range[0].start,
257		gKernelArgs.arch_args.physMap.size,
258		Pte {.isRead = true, .isWrite = true}.val);
259
260	// Boot loader
261	MapRangeIdentity((addr_t)gMemBase, &gStackEnd - gMemBase,
262		Pte {.isRead = true, .isWrite = true, .isExec = true}.val);
263
264	// Memory regions
265	MemoryRegion* region;
266	for (region = sRegions; region != NULL; region = region->next) {
267		Pte flags {
268			.isRead  = (region->protection & B_READ_AREA)    != 0,
269			.isWrite = (region->protection & B_WRITE_AREA)   != 0,
270			.isExec  = (region->protection & B_EXECUTE_AREA) != 0
271		};
272		MapRange(region->virtAdr, region->physAdr, region->size, flags.val);
273	}
274
275	// Devices
276	MapAddrRange(gKernelArgs.arch_args.clint, Pte {.isRead = true, .isWrite = true}.val);
277	MapAddrRange(gKernelArgs.arch_args.htif, Pte {.isRead = true, .isWrite = true}.val);
278	MapAddrRange(gKernelArgs.arch_args.plic, Pte {.isRead = true, .isWrite = true}.val);
279	if (strcmp(gKernelArgs.arch_args.uart.kind, "") != 0) {
280		MapAddrRange(gKernelArgs.arch_args.uart.regs,
281			Pte {.isRead = true, .isWrite = true}.val);
282	}
283}
284
285
286static uint64
287GetSatp()
288{
289	return SatpReg{
290		.ppn = sPageTable / B_PAGE_SIZE,
291		.asid = 0,
292		.mode = satpModeSv39
293	}.val;
294}
295
296
297//	#pragma mark -
298
299extern "C" status_t
300platform_allocate_region(void** address, size_t size, uint8 protection,
301	bool exactAddress)
302{
303	size = ROUNDUP(size, B_PAGE_SIZE);
304
305	if (exactAddress)
306		return B_ERROR;
307
308	ObjectDeleter<MemoryRegion> region(new(std::nothrow) MemoryRegion());
309	if (!region.IsSet())
310		return B_NO_MEMORY;
311
312	region->physAdr = AllocPhysPages(size);
313	if (region->physAdr == 0)
314		return B_NO_MEMORY;
315
316	region->virtAdr = AllocVirtPages(size);
317	region->size = size;
318	region->protection = protection;
319
320	*address = (void*)region->physAdr;
321
322	region->next = sRegions;
323	sRegions = region.Detach();
324
325	return B_OK;
326}
327
328
329extern "C" status_t
330platform_free_region(void* address, size_t size)
331{
332	MemoryRegion* prev = NULL;
333	MemoryRegion* region = sRegions;
334	while (region != NULL && !(region->physAdr == (phys_addr_t)address)) {
335		prev = region;
336		region = region->next;
337	}
338	if (region == NULL) {
339		panic("platform_free_region: address %p is not allocated\n", address);
340		return B_ERROR;
341	}
342	FreePhysPages(region->physAdr, region->size);
343	FreeVirtPages(region->virtAdr, region->size);
344	if (prev == NULL)
345		sRegions = region->next;
346	else
347		prev->next = region->next;
348
349	delete region;
350
351	return B_OK;
352}
353
354
355void
356platform_release_heap(struct stage2_args* args, void* base)
357{
358}
359
360
361status_t
362platform_init_heap(struct stage2_args* args, void** _base, void** _top)
363{
364	addr_t heap = AllocPhysPages(args->heap_size);
365	if (heap == 0)
366		return B_NO_MEMORY;
367
368	*_base = (void*)heap;
369	*_top = (void*)(heap + args->heap_size);
370	return B_OK;
371}
372
373
374status_t
375platform_bootloader_address_to_kernel_address(void* address, addr_t* result)
376{
377	MemoryRegion* region = sRegions;
378	while (region != NULL && !((phys_addr_t)address >= region->physAdr
379		&& (phys_addr_t)address < region->physAdr + region->size))
380		region = region->next;
381
382	if (region == NULL)
383		return B_ERROR;
384
385	*result = (addr_t)address - region->physAdr + region->virtAdr;
386	return B_OK;
387}
388
389
390status_t
391platform_kernel_address_to_bootloader_address(addr_t address, void** result)
392{
393	MemoryRegion* region = sRegions;
394	while (region != NULL && !((phys_addr_t)address >= region->virtAdr
395		&& (phys_addr_t)address < region->virtAdr + region->size))
396		region = region->next;
397
398	if (region == NULL)
399		return B_ERROR;
400
401	*result = (void*)(address - region->virtAdr + region->physAdr);
402	return B_OK;
403}
404
405
406//	#pragma mark -
407
408void
409mmu_init(void)
410{
411}
412
413
414void
415mmu_init_for_kernel(addr_t& satp)
416{
417	// map in a kernel stack
418	void* stack_address = NULL;
419	if (platform_allocate_region(&stack_address,
420		KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE,
421		B_READ_AREA | B_WRITE_AREA, false)
422		!= B_OK) {
423		panic("Unabled to allocate a stack");
424	}
425	gKernelArgs.cpu_kstack[0].start = fix_address((addr_t)stack_address);
426	gKernelArgs.cpu_kstack[0].size = KERNEL_STACK_SIZE
427		+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
428	dprintf("Kernel stack at %#lx\n", gKernelArgs.cpu_kstack[0].start);
429
430	gKernelArgs.num_physical_memory_ranges = 0;
431	insert_physical_memory_range((addr_t)gMemBase, gTotalMem);
432
433	gKernelArgs.num_virtual_allocated_ranges = 0;
434	gKernelArgs.arch_args.num_virtual_ranges_to_keep = 0;
435
436	SetupPageTable();
437	satp = GetSatp();
438	dprintf("satp: %#" B_PRIx64 "\n", satp);
439
440	gKernelArgs.num_physical_allocated_ranges = 0;
441	insert_physical_allocated_range((addr_t)gMemBase, gFreeMem - gMemBase);
442
443	sort_address_ranges(gKernelArgs.virtual_allocated_range, gKernelArgs.num_virtual_allocated_ranges);
444}
445