1/*
2 * Copyright 2004-2008, Axel Dörfler, axeld@pinc-software.de.
3 * Based on code written by Travis Geiselbrecht for NewOS.
4 *
5 * Distributed under the terms of the MIT License.
6 */
7
8
9#include "mmu.h"
10
11#include <string.h>
12
13#include <OS.h>
14
15#include <arch/cpu.h>
16#include <arch/x86/descriptors.h>
17#include <arch_kernel.h>
18#include <boot/platform.h>
19#include <boot/stdio.h>
20#include <boot/kernel_args.h>
21#include <boot/stage2.h>
22#include <kernel.h>
23
24#include "bios.h"
25#include "interrupts.h"
26
27
28/*!	The (physical) memory layout of the boot loader is currently as follows:
29	  0x0500 - 0x10000	protected mode stack
30	  0x0500 - 0x09000	real mode stack
31	 0x10000 - ?		code (up to ~500 kB)
32	 0x90000			1st temporary page table (identity maps 0-4 MB)
33	 0x91000			2nd (4-8 MB)
34	 0x92000 - 0x92000	further page tables
35	 0x9e000 - 0xa0000	SMP trampoline code
36	[0xa0000 - 0x100000	BIOS/ROM/reserved area]
37	0x100000			page directory
38	     ...			boot loader heap (32 kB)
39	     ...			free physical memory
40
41	The first 8 MB are identity mapped (0x0 - 0x0800000); paging is turned
42	on. The kernel is mapped at 0x80000000, all other stuff mapped by the
43	loader (kernel args, modules, driver settings, ...) comes after
44	0x80020000 which means that there is currently only 2 MB reserved for
45	the kernel itself (see kMaxKernelSize).
46
47	The layout in PXE mode differs a bit from this, see definitions below.
48*/
49
50//#define TRACE_MMU
51#ifdef TRACE_MMU
52#	define TRACE(x...) dprintf(x)
53#else
54#	define TRACE(x...) ;
55#endif
56
57
58//#define TRACE_MEMORY_MAP
59	// Define this to print the memory map to serial debug,
60	// You also need to define ENABLE_SERIAL in serial.cpp
61	// for output to work.
62
63
64// memory structure returned by int 0x15, ax 0xe820
65struct extended_memory {
66	uint64 base_addr;
67	uint64 length;
68	uint32 type;
69};
70
71
72static const uint32 kDefaultPageTableFlags = 0x07;	// present, user, R/W
73static const size_t kMaxKernelSize = 0x1000000;		// 16 MB for the kernel
74
75// working page directory and page table
76static uint32 *sPageDirectory = 0;
77
78#ifdef _PXE_ENV
79
80static addr_t sNextPhysicalAddress = 0x112000;
81static addr_t sNextVirtualAddress = KERNEL_LOAD_BASE + kMaxKernelSize;
82
83static addr_t sNextPageTableAddress = 0x7d000;
84static const uint32 kPageTableRegionEnd = 0x8b000;
85	// we need to reserve 2 pages for the SMP trampoline code
86
87#else
88
89static addr_t sNextPhysicalAddress = 0x100000;
90static addr_t sNextVirtualAddress = KERNEL_LOAD_BASE + kMaxKernelSize;
91
92static addr_t sNextPageTableAddress = 0x90000;
93static const uint32 kPageTableRegionEnd = 0x9e000;
94	// we need to reserve 2 pages for the SMP trampoline code
95
96#endif
97
98
99static addr_t
100get_next_virtual_address(size_t size)
101{
102	addr_t address = sNextVirtualAddress;
103	sNextVirtualAddress += size;
104
105	return address;
106}
107
108
109static addr_t
110get_next_physical_address(size_t size)
111{
112	uint64 base;
113	if (!get_free_address_range(gKernelArgs.physical_allocated_range,
114			gKernelArgs.num_physical_allocated_ranges, sNextPhysicalAddress,
115			size, &base)) {
116		panic("Out of physical memory!");
117		return 0;
118	}
119
120	insert_physical_allocated_range(base, size);
121	sNextPhysicalAddress = base + size;
122		// TODO: Can overflow theoretically.
123
124	return base;
125}
126
127
128static addr_t
129get_next_virtual_page()
130{
131	return get_next_virtual_address(B_PAGE_SIZE);
132}
133
134
135static addr_t
136get_next_physical_page()
137{
138	return get_next_physical_address(B_PAGE_SIZE);
139}
140
141
142static uint32 *
143get_next_page_table()
144{
145	TRACE("get_next_page_table, sNextPageTableAddress %#" B_PRIxADDR
146		", kPageTableRegionEnd %#" B_PRIxADDR "\n", sNextPageTableAddress,
147		kPageTableRegionEnd);
148
149	addr_t address = sNextPageTableAddress;
150	if (address >= kPageTableRegionEnd)
151		return (uint32 *)get_next_physical_page();
152
153	sNextPageTableAddress += B_PAGE_SIZE;
154	return (uint32 *)address;
155}
156
157
158/*!	Adds a new page table for the specified base address */
159static uint32*
160add_page_table(addr_t base)
161{
162	base = ROUNDDOWN(base, B_PAGE_SIZE * 1024);
163
164	// Get new page table and clear it out
165	uint32 *pageTable = get_next_page_table();
166	if (pageTable > (uint32 *)(8 * 1024 * 1024)) {
167		panic("tried to add page table beyond the identity mapped 8 MB "
168			"region\n");
169		return NULL;
170	}
171
172	TRACE("add_page_table(base = %p), got page: %p\n", (void*)base, pageTable);
173
174	gKernelArgs.arch_args.pgtables[gKernelArgs.arch_args.num_pgtables++]
175		= (uint32)pageTable;
176
177	for (int32 i = 0; i < 1024; i++)
178		pageTable[i] = 0;
179
180	// put the new page table into the page directory
181	sPageDirectory[base / (4 * 1024 * 1024)]
182		= (uint32)pageTable | kDefaultPageTableFlags;
183
184	// update the virtual end address in the kernel args
185	base += B_PAGE_SIZE * 1024;
186	if (base > gKernelArgs.arch_args.virtual_end)
187		gKernelArgs.arch_args.virtual_end = base;
188
189	return pageTable;
190}
191
192
193static void
194unmap_page(addr_t virtualAddress)
195{
196	TRACE("unmap_page(virtualAddress = %p)\n", (void *)virtualAddress);
197
198	if (virtualAddress < KERNEL_LOAD_BASE) {
199		panic("unmap_page: asked to unmap invalid page %p!\n",
200			(void *)virtualAddress);
201	}
202
203	// unmap the page from the correct page table
204	uint32 *pageTable = (uint32 *)(sPageDirectory[virtualAddress
205		/ (B_PAGE_SIZE * 1024)] & 0xfffff000);
206	pageTable[(virtualAddress % (B_PAGE_SIZE * 1024)) / B_PAGE_SIZE] = 0;
207
208	asm volatile("invlpg (%0)" : : "r" (virtualAddress));
209}
210
211
212/*!	Creates an entry to map the specified virtualAddress to the given
213	physicalAddress.
214	If the mapping goes beyond the current page table, it will allocate
215	a new one. If it cannot map the requested page, it panics.
216*/
217static void
218map_page(addr_t virtualAddress, addr_t physicalAddress, uint32 flags)
219{
220	TRACE("map_page: vaddr 0x%lx, paddr 0x%lx\n", virtualAddress,
221		physicalAddress);
222
223	if (virtualAddress < KERNEL_LOAD_BASE) {
224		panic("map_page: asked to map invalid page %p!\n",
225			(void *)virtualAddress);
226	}
227
228	uint32 *pageTable = (uint32 *)(sPageDirectory[virtualAddress
229		/ (B_PAGE_SIZE * 1024)] & 0xfffff000);
230
231	if (pageTable == NULL) {
232		// we need to add a new page table
233		pageTable = add_page_table(virtualAddress);
234
235		if (pageTable == NULL) {
236			panic("map_page: failed to allocate a page table for virtual "
237				"address %p\n", (void*)virtualAddress);
238			return;
239		}
240	}
241
242	physicalAddress &= ~(B_PAGE_SIZE - 1);
243
244	// map the page to the correct page table
245	uint32 tableEntry = (virtualAddress % (B_PAGE_SIZE * 1024)) / B_PAGE_SIZE;
246
247	TRACE("map_page: inserting pageTable %p, tableEntry %" B_PRIu32
248		", physicalAddress %#" B_PRIxADDR "\n", pageTable, tableEntry,
249		physicalAddress);
250
251	pageTable[tableEntry] = physicalAddress | flags;
252
253	asm volatile("invlpg (%0)" : : "r" (virtualAddress));
254
255	TRACE("map_page: done\n");
256}
257
258
259#ifdef TRACE_MEMORY_MAP
260static const char *
261e820_memory_type(uint32 type)
262{
263	switch (type) {
264		case 1: return "memory";
265		case 2: return "reserved";
266		case 3: return "ACPI reclaim";
267		case 4: return "ACPI NVS";
268		default: return "unknown/reserved";
269	}
270}
271#endif
272
273
274static uint32
275get_memory_map(extended_memory **_extendedMemory)
276{
277	extended_memory *block = (extended_memory *)kExtraSegmentScratch;
278	bios_regs regs = {0, 0, sizeof(extended_memory), 0, 0, (uint32)block, 0, 0};
279	uint32 count = 0;
280
281	TRACE("get_memory_map()\n");
282
283	do {
284		regs.eax = 0xe820;
285		regs.edx = 'SMAP';
286
287		call_bios(0x15, &regs);
288		if ((regs.flags & CARRY_FLAG) != 0)
289			return 0;
290
291		regs.edi += sizeof(extended_memory);
292		count++;
293	} while (regs.ebx != 0);
294
295	*_extendedMemory = block;
296
297#ifdef TRACE_MEMORY_MAP
298	dprintf("extended memory info (from 0xe820):\n");
299	for (uint32 i = 0; i < count; i++) {
300		dprintf("    base 0x%08Lx, len 0x%08Lx, type %lu (%s)\n",
301			block[i].base_addr, block[i].length,
302			block[i].type, e820_memory_type(block[i].type));
303	}
304#endif
305
306	return count;
307}
308
309
310static void
311init_page_directory(void)
312{
313	TRACE("init_page_directory\n");
314
315	// allocate a new pgdir
316	sPageDirectory = (uint32 *)get_next_physical_page();
317	gKernelArgs.arch_args.phys_pgdir = (uint32)sPageDirectory;
318
319	// clear out the pgdir
320	for (int32 i = 0; i < 1024; i++) {
321		sPageDirectory[i] = 0;
322	}
323
324	// Identity map the first 8 MB of memory so that their
325	// physical and virtual address are the same.
326	// These page tables won't be taken over into the kernel.
327
328	// make the first page table at the first free spot
329	uint32 *pageTable = get_next_page_table();
330
331	for (int32 i = 0; i < 1024; i++) {
332		pageTable[i] = (i * 0x1000) | kDefaultPageFlags;
333	}
334
335	sPageDirectory[0] = (uint32)pageTable | kDefaultPageFlags;
336
337	// make the second page table
338	pageTable = get_next_page_table();
339
340	for (int32 i = 0; i < 1024; i++) {
341		pageTable[i] = (i * 0x1000 + 0x400000) | kDefaultPageFlags;
342	}
343
344	sPageDirectory[1] = (uint32)pageTable | kDefaultPageFlags;
345
346	gKernelArgs.arch_args.num_pgtables = 0;
347
348	// switch to the new pgdir and enable paging
349	asm("movl %0, %%eax;"
350		"movl %%eax, %%cr3;" : : "m" (sPageDirectory) : "eax");
351	// Important.  Make sure supervisor threads can fault on read only pages...
352	asm("movl %%eax, %%cr0" : : "a" ((1 << 31) | (1 << 16) | (1 << 5) | 1));
353}
354
355
356//	#pragma mark -
357
358
359/*!
360	Neither \a virtualAddress nor \a size need to be aligned, but the function
361	will map all pages the range intersects with.
362	If physicalAddress is not page-aligned, the returned virtual address will
363	have the same "misalignment".
364*/
365extern "C" addr_t
366mmu_map_physical_memory(addr_t physicalAddress, size_t size, uint32 flags)
367{
368	addr_t address = sNextVirtualAddress;
369	addr_t pageOffset = physicalAddress & (B_PAGE_SIZE - 1);
370
371	physicalAddress -= pageOffset;
372	size += pageOffset;
373
374	for (addr_t offset = 0; offset < size; offset += B_PAGE_SIZE) {
375		map_page(get_next_virtual_page(), physicalAddress + offset, flags);
376	}
377
378	return address + pageOffset;
379}
380
381
382extern "C" void *
383mmu_allocate(void *virtualAddress, size_t size)
384{
385	TRACE("mmu_allocate: requested vaddr: %p, next free vaddr: 0x%lx, size: "
386		"%ld\n", virtualAddress, sNextVirtualAddress, size);
387
388	size = (size + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
389		// get number of pages to map
390
391	if (virtualAddress != NULL) {
392		// This special path is almost only useful for loading the
393		// kernel into memory; it will only allow you to map the
394		// 'kMaxKernelSize' bytes following the kernel base address.
395		// Also, it won't check for already mapped addresses, so
396		// you better know why you are here :)
397		addr_t address = (addr_t)virtualAddress;
398
399		// is the address within the valid range?
400		if (address < KERNEL_LOAD_BASE
401			|| address + size >= KERNEL_LOAD_BASE + kMaxKernelSize)
402			return NULL;
403
404		for (uint32 i = 0; i < size; i++) {
405			map_page(address, get_next_physical_page(), kDefaultPageFlags);
406			address += B_PAGE_SIZE;
407		}
408
409		return virtualAddress;
410	}
411
412	void *address = (void *)sNextVirtualAddress;
413
414	for (uint32 i = 0; i < size; i++) {
415		map_page(get_next_virtual_page(), get_next_physical_page(),
416			kDefaultPageFlags);
417	}
418
419	return address;
420}
421
422
423/*!	Allocates a single page and returns both its virtual and physical
424	addresses.
425*/
426void *
427mmu_allocate_page(addr_t *_physicalAddress)
428{
429	addr_t virt = get_next_virtual_page();
430	addr_t phys = get_next_physical_page();
431
432	map_page(virt, phys, kDefaultPageFlags);
433
434	if (_physicalAddress)
435		*_physicalAddress = phys;
436
437	return (void *)virt;
438}
439
440
441/*!	Allocates the given physical range.
442	\return \c true, if the range could be allocated, \c false otherwise.
443*/
444bool
445mmu_allocate_physical(addr_t base, size_t size)
446{
447	// check whether the physical memory range exists at all
448	if (!is_address_range_covered(gKernelArgs.physical_memory_range,
449			gKernelArgs.num_physical_memory_ranges, base, size)) {
450		return false;
451	}
452
453	// check whether the physical range is still free
454	uint64 foundBase;
455	if (!get_free_address_range(gKernelArgs.physical_allocated_range,
456			gKernelArgs.num_physical_allocated_ranges, base, size, &foundBase)
457		|| foundBase != base) {
458		return false;
459	}
460
461	return insert_physical_allocated_range(base, size) == B_OK;
462}
463
464
465/*!	This will unmap the allocated chunk of memory from the virtual
466	address space. It might not actually free memory (as its implementation
467	is very simple), but it might.
468	Neither \a virtualAddress nor \a size need to be aligned, but the function
469	will unmap all pages the range intersects with.
470*/
471extern "C" void
472mmu_free(void *virtualAddress, size_t size)
473{
474	TRACE("mmu_free(virtualAddress = %p, size: %ld)\n", virtualAddress, size);
475
476	addr_t address = (addr_t)virtualAddress;
477	addr_t pageOffset = address % B_PAGE_SIZE;
478	address -= pageOffset;
479	size = (size + pageOffset + B_PAGE_SIZE - 1) / B_PAGE_SIZE * B_PAGE_SIZE;
480
481	// is the address within the valid range?
482	if (address < KERNEL_LOAD_BASE || address + size > sNextVirtualAddress) {
483		panic("mmu_free: asked to unmap out of range region (%p, size %lx)\n",
484			(void *)address, size);
485	}
486
487	// unmap all pages within the range
488	for (size_t i = 0; i < size; i += B_PAGE_SIZE) {
489		unmap_page(address);
490		address += B_PAGE_SIZE;
491	}
492
493	if (address == sNextVirtualAddress) {
494		// we can actually reuse the virtual address space
495		sNextVirtualAddress -= size;
496	}
497}
498
499
500size_t
501mmu_get_virtual_usage()
502{
503	return sNextVirtualAddress - KERNEL_LOAD_BASE;
504}
505
506
507bool
508mmu_get_virtual_mapping(addr_t virtualAddress, addr_t *_physicalAddress)
509{
510	if (virtualAddress < KERNEL_LOAD_BASE) {
511		panic("mmu_get_virtual_mapping: asked to lookup invalid page %p!\n",
512			(void *)virtualAddress);
513	}
514
515	uint32 dirEntry = sPageDirectory[virtualAddress / (B_PAGE_SIZE * 1024)];
516	if ((dirEntry & (1 << 0)) == 0)
517		return false;
518
519	uint32 *pageTable = (uint32 *)(dirEntry & 0xfffff000);
520	uint32 tableEntry = pageTable[(virtualAddress % (B_PAGE_SIZE * 1024))
521		/ B_PAGE_SIZE];
522	if ((tableEntry & (1 << 0)) == 0)
523		return false;
524
525	*_physicalAddress = tableEntry & 0xfffff000;
526	return true;
527}
528
529
530/*!	Sets up the final and kernel accessible GDT and IDT tables.
531	BIOS calls won't work any longer after this function has
532	been called.
533*/
534extern "C" void
535mmu_init_for_kernel(void)
536{
537	TRACE("mmu_init_for_kernel\n");
538	// set up a new idt
539	{
540		uint32 *idt;
541
542		// find a new idt
543		idt = (uint32 *)get_next_physical_page();
544		gKernelArgs.arch_args.phys_idt = (uint32)idt;
545
546		TRACE("idt at %p\n", idt);
547
548		// map the idt into virtual space
549		gKernelArgs.arch_args.vir_idt = (uint32)get_next_virtual_page();
550		map_page(gKernelArgs.arch_args.vir_idt, (uint32)idt, kDefaultPageFlags);
551
552		// initialize it
553		interrupts_init_kernel_idt((void*)(addr_t)gKernelArgs.arch_args.vir_idt,
554			IDT_LIMIT);
555
556		TRACE("idt at virtual address 0x%llx\n", gKernelArgs.arch_args.vir_idt);
557	}
558
559	// set up a new gdt
560	{
561		struct gdt_idt_descr gdtDescriptor;
562		segment_descriptor *gdt;
563
564		// find a new gdt
565		gdt = (segment_descriptor *)get_next_physical_page();
566		gKernelArgs.arch_args.phys_gdt = (uint32)gdt;
567
568		TRACE("gdt at %p\n", gdt);
569
570		// map the gdt into virtual space
571		gKernelArgs.arch_args.vir_gdt = (uint32)get_next_virtual_page();
572		map_page(gKernelArgs.arch_args.vir_gdt, (uint32)gdt, kDefaultPageFlags);
573
574		// put standard segment descriptors in it
575		segment_descriptor* virtualGDT
576			= (segment_descriptor*)(addr_t)gKernelArgs.arch_args.vir_gdt;
577		clear_segment_descriptor(&virtualGDT[0]);
578
579		// seg 0x08 - kernel 4GB code
580		set_segment_descriptor(&virtualGDT[1], 0, 0xffffffff, DT_CODE_READABLE,
581			DPL_KERNEL);
582
583		// seg 0x10 - kernel 4GB data
584		set_segment_descriptor(&virtualGDT[2], 0, 0xffffffff, DT_DATA_WRITEABLE,
585			DPL_KERNEL);
586
587		// seg 0x1b - ring 3 user 4GB code
588		set_segment_descriptor(&virtualGDT[3], 0, 0xffffffff, DT_CODE_READABLE,
589			DPL_USER);
590
591		// seg 0x23 - ring 3 user 4GB data
592		set_segment_descriptor(&virtualGDT[4], 0, 0xffffffff, DT_DATA_WRITEABLE,
593			DPL_USER);
594
595		// virtualGDT[5] and above will be filled later by the kernel
596		// to contain the TSS descriptors, and for TLS (one for every CPU)
597
598		// load the GDT
599		gdtDescriptor.limit = GDT_LIMIT - 1;
600		gdtDescriptor.base = (void*)(addr_t)gKernelArgs.arch_args.vir_gdt;
601
602		asm("lgdt	%0;"
603			: : "m" (gdtDescriptor));
604
605		TRACE("gdt at virtual address %p\n",
606			(void*)gKernelArgs.arch_args.vir_gdt);
607	}
608
609	// Save the memory we've virtually allocated (for the kernel and other
610	// stuff)
611	gKernelArgs.virtual_allocated_range[0].start = KERNEL_LOAD_BASE;
612	gKernelArgs.virtual_allocated_range[0].size
613		= sNextVirtualAddress - KERNEL_LOAD_BASE;
614	gKernelArgs.num_virtual_allocated_ranges = 1;
615
616	// sort the address ranges
617	sort_address_ranges(gKernelArgs.physical_memory_range,
618		gKernelArgs.num_physical_memory_ranges);
619	sort_address_ranges(gKernelArgs.physical_allocated_range,
620		gKernelArgs.num_physical_allocated_ranges);
621	sort_address_ranges(gKernelArgs.virtual_allocated_range,
622		gKernelArgs.num_virtual_allocated_ranges);
623
624#ifdef TRACE_MEMORY_MAP
625	{
626		uint32 i;
627
628		dprintf("phys memory ranges:\n");
629		for (i = 0; i < gKernelArgs.num_physical_memory_ranges; i++) {
630			dprintf("    base %#018" B_PRIx64 ", length %#018" B_PRIx64 "\n",
631				gKernelArgs.physical_memory_range[i].start,
632				gKernelArgs.physical_memory_range[i].size);
633		}
634
635		dprintf("allocated phys memory ranges:\n");
636		for (i = 0; i < gKernelArgs.num_physical_allocated_ranges; i++) {
637			dprintf("    base %#018" B_PRIx64 ", length %#018" B_PRIx64 "\n",
638				gKernelArgs.physical_allocated_range[i].start,
639				gKernelArgs.physical_allocated_range[i].size);
640		}
641
642		dprintf("allocated virt memory ranges:\n");
643		for (i = 0; i < gKernelArgs.num_virtual_allocated_ranges; i++) {
644			dprintf("    base %#018" B_PRIx64 ", length %#018" B_PRIx64 "\n",
645				gKernelArgs.virtual_allocated_range[i].start,
646				gKernelArgs.virtual_allocated_range[i].size);
647		}
648	}
649#endif
650}
651
652
653extern "C" void
654mmu_init(void)
655{
656	TRACE("mmu_init\n");
657
658	gKernelArgs.arch_args.virtual_end = KERNEL_LOAD_BASE;
659
660	gKernelArgs.physical_allocated_range[0].start = sNextPhysicalAddress;
661	gKernelArgs.physical_allocated_range[0].size = 0;
662	gKernelArgs.num_physical_allocated_ranges = 1;
663		// remember the start of the allocated physical pages
664
665	init_page_directory();
666
667	// Map the page directory into kernel space at 0xffc00000-0xffffffff
668	// this enables a mmu trick where the 4 MB region that this pgdir entry
669	// represents now maps the 4MB of potential pagetables that the pgdir
670	// points to. Thrown away later in VM bringup, but useful for now.
671	sPageDirectory[1023] = (uint32)sPageDirectory | kDefaultPageFlags;
672
673	// also map it on the next vpage
674	gKernelArgs.arch_args.vir_pgdir = get_next_virtual_page();
675	map_page(gKernelArgs.arch_args.vir_pgdir, (uint32)sPageDirectory,
676		kDefaultPageFlags);
677
678	// map in a kernel stack
679	gKernelArgs.cpu_kstack[0].start = (addr_t)mmu_allocate(NULL,
680		KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE);
681	gKernelArgs.cpu_kstack[0].size = KERNEL_STACK_SIZE
682		+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
683
684	TRACE("kernel stack at 0x%lx to 0x%lx\n", gKernelArgs.cpu_kstack[0].start,
685		gKernelArgs.cpu_kstack[0].start + gKernelArgs.cpu_kstack[0].size);
686
687	extended_memory *extMemoryBlock;
688	uint32 extMemoryCount = get_memory_map(&extMemoryBlock);
689
690	// figure out the memory map
691	if (extMemoryCount > 0) {
692		gKernelArgs.num_physical_memory_ranges = 0;
693
694		for (uint32 i = 0; i < extMemoryCount; i++) {
695			// Type 1 is available memory
696			if (extMemoryBlock[i].type == 1) {
697				uint64 base = extMemoryBlock[i].base_addr;
698				uint64 length = extMemoryBlock[i].length;
699				uint64 end = base + length;
700
701				// round everything up to page boundaries, exclusive of pages
702				// it partially occupies
703				base = ROUNDUP(base, B_PAGE_SIZE);
704				end = ROUNDDOWN(end, B_PAGE_SIZE);
705
706				// We ignore all memory beyond 4 GB, if phys_addr_t is only
707				// 32 bit wide.
708				#if B_HAIKU_PHYSICAL_BITS == 32
709					if (end > 0x100000000ULL)
710						end = 0x100000000ULL;
711				#endif
712
713				// Also ignore memory below 1 MB. Apparently some BIOSes fail to
714				// provide the correct range type for some ranges (cf. #1925).
715				// Later in the kernel we will reserve the range 0x0 - 0xa0000
716				// and apparently 0xa0000 - 0x100000 never contain usable
717				// memory, so we don't lose anything by doing that.
718				if (base < 0x100000)
719					base = 0x100000;
720
721				gKernelArgs.ignored_physical_memory
722					+= length - (max_c(end, base) - base);
723
724				if (end <= base)
725					continue;
726
727				status_t status = insert_physical_memory_range(base, end - base);
728				if (status == B_ENTRY_NOT_FOUND) {
729					panic("mmu_init(): Failed to add physical memory range "
730						"%#" B_PRIx64 " - %#" B_PRIx64 " : all %d entries are "
731						"used already!\n", base, end, MAX_PHYSICAL_MEMORY_RANGE);
732				} else if (status != B_OK) {
733					panic("mmu_init(): Failed to add physical memory range "
734						"%#" B_PRIx64 " - %#" B_PRIx64 "\n", base, end);
735				}
736			} else if (extMemoryBlock[i].type == 3) {
737				// ACPI reclaim -- physical memory we could actually use later
738				gKernelArgs.ignored_physical_memory += extMemoryBlock[i].length;
739			}
740		}
741
742		// sort the ranges
743		sort_address_ranges(gKernelArgs.physical_memory_range,
744			gKernelArgs.num_physical_memory_ranges);
745
746		// On some machines we get several ranges that contain only a few pages
747		// (or even only one) each, which causes us to run out of MTRRs later.
748		// So we remove all ranges smaller than 64 KB, hoping that this will
749		// leave us only with a few larger contiguous ranges (ideally one).
750		for (int32 i = gKernelArgs.num_physical_memory_ranges - 1; i >= 0;
751				i--) {
752			uint64 size = gKernelArgs.physical_memory_range[i].size;
753			if (size < 64 * 1024) {
754				uint64 start = gKernelArgs.physical_memory_range[i].start;
755				remove_address_range(gKernelArgs.physical_memory_range,
756					&gKernelArgs.num_physical_memory_ranges,
757					MAX_PHYSICAL_MEMORY_RANGE, start, size);
758			}
759		}
760	} else {
761		bios_regs regs;
762
763		// We dont have an extended map, assume memory is contiguously mapped
764		// at 0x0, but leave out the BIOS range ((640k - 1 page) to 1 MB).
765		gKernelArgs.physical_memory_range[0].start = 0;
766		gKernelArgs.physical_memory_range[0].size = 0x9f000;
767		gKernelArgs.physical_memory_range[1].start = 0x100000;
768
769		regs.eax = 0xe801; // AX
770		call_bios(0x15, &regs);
771		if ((regs.flags & CARRY_FLAG) != 0) {
772			regs.eax = 0x8800; // AH 88h
773			call_bios(0x15, &regs);
774			if ((regs.flags & CARRY_FLAG) != 0) {
775				// TODO: for now!
776				dprintf("No memory size - using 64 MB (fix me!)\n");
777				uint32 memSize = 64 * 1024 * 1024;
778				gKernelArgs.physical_memory_range[1].size = memSize - 0x100000;
779			} else {
780				dprintf("Get Extended Memory Size succeeded.\n");
781				gKernelArgs.physical_memory_range[1].size = regs.eax * 1024;
782			}
783			gKernelArgs.num_physical_memory_ranges = 2;
784		} else {
785			dprintf("Get Memory Size for Large Configurations succeeded.\n");
786			gKernelArgs.physical_memory_range[1].size = regs.ecx * 1024;
787			gKernelArgs.physical_memory_range[2].start = 0x1000000;
788			gKernelArgs.physical_memory_range[2].size = regs.edx * 64 * 1024;
789			gKernelArgs.num_physical_memory_ranges = 3;
790		}
791	}
792
793	gKernelArgs.arch_args.page_hole = 0xffc00000;
794}
795
796
797//	#pragma mark -
798
799
800extern "C" status_t
801platform_allocate_region(void **_address, size_t size, uint8 protection,
802	bool /*exactAddress*/)
803{
804	void *address = mmu_allocate(*_address, size);
805	if (address == NULL)
806		return B_NO_MEMORY;
807
808	*_address = address;
809	return B_OK;
810}
811
812
813extern "C" status_t
814platform_free_region(void *address, size_t size)
815{
816	mmu_free(address, size);
817	return B_OK;
818}
819
820
821void
822platform_release_heap(struct stage2_args *args, void *base)
823{
824	// It will be freed automatically, since it is in the
825	// identity mapped region, and not stored in the kernel's
826	// page tables.
827}
828
829
830status_t
831platform_init_heap(struct stage2_args *args, void **_base, void **_top)
832{
833	void *heap = (void *)get_next_physical_address(args->heap_size);
834	if (heap == NULL)
835		return B_NO_MEMORY;
836
837	*_base = heap;
838	*_top = (void *)((int8 *)heap + args->heap_size);
839	return B_OK;
840}
841
842
843