1/*
2 * Copyright 2004-2007, Axel Dörfler, axeld@pinc-software.de.
3 * Based on code written by Travis Geiselbrecht for NewOS.
4 *
5 * Distributed under the terms of the MIT License.
6 */
7
8
9#include "amiga_memory_map.h"
10#include "rom_calls.h"
11#include "mmu.h"
12
13#include <boot/platform.h>
14#include <boot/stdio.h>
15#include <boot/kernel_args.h>
16#include <boot/stage2.h>
17#include <arch/cpu.h>
18#include <arch_kernel.h>
19#include <kernel.h>
20
21#include <OS.h>
22
23#include <string.h>
24
25
26//XXX: x86
27/** The (physical) memory layout of the boot loader is currently as follows:
28 *	  0x0500 - 0x10000	protected mode stack
29 *	  0x0500 - 0x09000	real mode stack
30 *	 0x10000 - ?		code (up to ~500 kB)
31 *	 0x90000			1st temporary page table (identity maps 0-4 MB)
32 *	 0x91000			2nd (4-8 MB)
33 *	 0x92000 - 0x92000	further page tables
34 *	 0x9e000 - 0xa0000	SMP trampoline code
35 *	[0xa0000 - 0x100000	BIOS/ROM/reserved area]
36 *	0x100000			page directory
37 *	     ...			boot loader heap (32 kB)
38 *	     ...			free physical memory
39 *
40 *	The first 8 MB are identity mapped (0x0 - 0x0800000); paging is turned
41 *	on. The kernel is mapped at 0x80000000, all other stuff mapped by the
42 *	loader (kernel args, modules, driver settings, ...) comes after
43 *	0x81000000 which means that there is currently only 1 MB reserved for
44 *	the kernel itself (see kMaxKernelSize).
45 */
46
47// notes m68k:
48/** The (physical) memory layout of the boot loader is currently as follows:
49 *	  0x0800 - 0x10000	supervisor mode stack (1) XXX: more ? x86 starts at 500
50 *	 0x10000 - ?		code (up to ~500 kB)
51 *  0x100000 or FAST_RAM_BASE if any:
52 *	     ...			page root directory
53 *	     ...			interrupt vectors (VBR)
54 *	     ...			page directory
55 *	     ...			boot loader heap (32 kB)
56 *	     ...			free physical memory
57 *  0xdNNNNN			video buffer usually there, as per v_bas_ad
58 *						(=Logbase() but Physbase() is better)
59 *
60 *	The first 32 MB (2) are identity mapped (0x0 - 0x1000000); paging
61 *	is turned on. The kernel is mapped at 0x80000000, all other stuff
62 *	mapped by the loader (kernel args, modules, driver settings, ...)
63 *	comes after 0x81000000 which means that there is currently only
64 *	1 MB reserved for the kernel itself (see kMaxKernelSize).
65 *
66 *	(1) no need for user stack, we are already in supervisor mode in the
67 *	loader.
68 *	(2) maps the whole regular ST space; transparent translation registers
69 *	have larger granularity anyway.
70 */
71#warning M68K: check for Physbase() < ST_RAM_TOP
72
73//#define TRACE_MMU
74#ifdef TRACE_MMU
75#	define TRACE(x) dprintf x
76#else
77#	define TRACE(x) ;
78#endif
79
80
81// since the page root directory doesn't take a full page (1k)
82// we stuff some other stuff after it, like the interrupt vectors (1k)
83#define VBR_PAGE_OFFSET 1024
84
85static const uint32 kDefaultPageTableFlags = 0x07;	// present, user, R/W
86static const size_t kMaxKernelSize = 0x100000;		// 1 MB for the kernel
87
88// working page directory and page table
89addr_t gPageRoot = 0;
90
91static addr_t sNextPhysicalAddress = 0x100000;
92static addr_t sNextVirtualAddress = KERNEL_LOAD_BASE + kMaxKernelSize;
93static addr_t sMaxVirtualAddress = KERNEL_LOAD_BASE /*+ 0x400000*/;
94
95#if 0
96static addr_t sNextPageTableAddress = 0x90000;
97static const uint32 kPageTableRegionEnd = 0x9e000;
98	// we need to reserve 2 pages for the SMP trampoline code XXX:no
99#endif
100
101static const struct boot_mmu_ops *gMMUOps;
102
103static addr_t
104get_next_virtual_address(size_t size)
105{
106	addr_t address = sNextVirtualAddress;
107	sNextVirtualAddress += size;
108
109	TRACE(("%s(%d): %08x\n", __FUNCTION__, size, address));
110	return address;
111}
112
113
114static addr_t
115get_next_physical_address(size_t size)
116{
117	addr_t address = sNextPhysicalAddress;
118	sNextPhysicalAddress += size;
119
120	TRACE(("%s(%d): %08x\n", __FUNCTION__, size, address));
121	return address;
122}
123
124
125static addr_t
126get_next_virtual_page()
127{
128	TRACE(("%s\n", __FUNCTION__));
129	return get_next_virtual_address(B_PAGE_SIZE);
130}
131
132
133static addr_t
134get_next_physical_page()
135{
136	TRACE(("%s\n", __FUNCTION__));
137	return get_next_physical_address(B_PAGE_SIZE);
138}
139
140
141// allocate a page worth of page dir or tables
142extern "C" addr_t
143mmu_get_next_page_tables()
144{
145#if 0
146	TRACE(("mmu_get_next_page_tables, sNextPageTableAddress %p, kPageTableRegionEnd %p\n",
147		sNextPageTableAddress, kPageTableRegionEnd));
148
149	addr_t address = sNextPageTableAddress;
150	if (address >= kPageTableRegionEnd)
151		return (uint32 *)get_next_physical_page();
152
153	sNextPageTableAddress += B_PAGE_SIZE;
154	return (uint32 *)address;
155#endif
156	addr_t tbl = get_next_physical_page();
157	if (!tbl)
158		return tbl;
159	// shouldn't we fill this ?
160	//gKernelArgs.arch_args.pgtables[gKernelArgs.arch_args.num_pgtables++] = (uint32)pageTable;
161
162#if 0
163	// clear them
164	uint32 *p = (uint32 *)tbl;
165	for (int32 i = 0; i < 1024; i++)
166		p[i] = 0;
167#endif
168	return tbl;
169}
170
171#if 0
172/**	Adds a new page table for the specified base address */
173
174static void
175add_page_table(addr_t base)
176{
177	TRACE(("add_page_table(base = %p)\n", (void *)base));
178#if 0
179
180	// Get new page table and clear it out
181	uint32 *pageTable = mmu_get_next_page_tables();
182	if (pageTable > (uint32 *)(8 * 1024 * 1024))
183		panic("tried to add page table beyond the indentity mapped 8 MB region\n");
184
185	gKernelArgs.arch_args.pgtables[gKernelArgs.arch_args.num_pgtables++] = (uint32)pageTable;
186
187	for (int32 i = 0; i < 1024; i++)
188		pageTable[i] = 0;
189
190	// put the new page table into the page directory
191	gPageRoot[base/(4*1024*1024)] = (uint32)pageTable | kDefaultPageTableFlags;
192#endif
193}
194#endif
195
196
197static void
198unmap_page(addr_t virtualAddress)
199{
200	gMMUOps->unmap_page(virtualAddress);
201}
202
203
204/** Creates an entry to map the specified virtualAddress to the given
205 *	physicalAddress.
206 *	If the mapping goes beyond the current page table, it will allocate
207 *	a new one. If it cannot map the requested page, it panics.
208 */
209
210static void
211map_page(addr_t virtualAddress, addr_t physicalAddress, uint32 flags)
212{
213	TRACE(("map_page: vaddr 0x%lx, paddr 0x%lx\n", virtualAddress, physicalAddress));
214
215	if (virtualAddress < KERNEL_LOAD_BASE)
216		panic("map_page: asked to map invalid page %p!\n", (void *)virtualAddress);
217
218	// slow but I'm too lazy to fix the code below
219	gMMUOps->add_page_table(virtualAddress);
220#if 0
221	if (virtualAddress >= sMaxVirtualAddress) {
222		// we need to add a new page table
223
224		gMMUOps->add_page_table(sMaxVirtualAddress);
225		// 64 pages / page table
226		sMaxVirtualAddress += B_PAGE_SIZE * 64;
227
228		if (virtualAddress >= sMaxVirtualAddress)
229			panic("map_page: asked to map a page to %p\n", (void *)virtualAddress);
230	}
231#endif
232
233	physicalAddress &= ~(B_PAGE_SIZE - 1);
234
235	// map the page to the correct page table
236	gMMUOps->map_page(virtualAddress, physicalAddress, flags);
237}
238
239
240static void
241init_page_directory(void)
242{
243	TRACE(("init_page_directory\n"));
244
245	// allocate a new pg root dir
246	gPageRoot = get_next_physical_page();
247	gKernelArgs.arch_args.phys_pgroot = (uint32)gPageRoot;
248	gKernelArgs.arch_args.phys_vbr = (uint32)gPageRoot + VBR_PAGE_OFFSET;
249
250	// set the root pointers
251	gMMUOps->load_rp(gPageRoot);
252	// allocate second level tables for kernel space
253	// this will simplify mmu code a lot, and only wastes 32KB
254	gMMUOps->allocate_kernel_pgdirs();
255	// enable mmu translation
256	gMMUOps->enable_paging();
257	//XXX: check for errors
258
259	//gKernelArgs.arch_args.num_pgtables = 0;
260	gMMUOps->add_page_table(KERNEL_LOAD_BASE);
261
262#if 0
263
264
265	// clear out the pgdir
266	for (int32 i = 0; i < 1024; i++) {
267		gPageRoot[i] = 0;
268	}
269
270	// Identity map the first 8 MB of memory so that their
271	// physical and virtual address are the same.
272	// These page tables won't be taken over into the kernel.
273
274	// make the first page table at the first free spot
275	uint32 *pageTable = mmu_get_next_page_tables();
276
277	for (int32 i = 0; i < 1024; i++) {
278		pageTable[i] = (i * 0x1000) | kDefaultPageFlags;
279	}
280
281	gPageRoot[0] = (uint32)pageTable | kDefaultPageFlags;
282
283	// make the second page table
284	pageTable = mmu_get_next_page_tables();
285
286	for (int32 i = 0; i < 1024; i++) {
287		pageTable[i] = (i * 0x1000 + 0x400000) | kDefaultPageFlags;
288	}
289
290	gPageRoot[1] = (uint32)pageTable | kDefaultPageFlags;
291
292	gKernelArgs.arch_args.num_pgtables = 0;
293	add_page_table(KERNEL_LOAD_BASE);
294
295	// switch to the new pgdir and enable paging
296	asm("movl %0, %%eax;"
297		"movl %%eax, %%cr3;" : : "m" (gPageRoot) : "eax");
298	// Important.  Make sure supervisor threads can fault on read only pages...
299	asm("movl %%eax, %%cr0" : : "a" ((1 << 31) | (1 << 16) | (1 << 5) | 1));
300#endif
301}
302
303
304//	#pragma mark -
305
306
307extern "C" addr_t
308mmu_map_physical_memory(addr_t physicalAddress, size_t size, uint32 flags)
309{
310	addr_t address = sNextVirtualAddress;
311	addr_t pageOffset = physicalAddress & (B_PAGE_SIZE - 1);
312
313	physicalAddress -= pageOffset;
314
315	for (addr_t offset = 0; offset < size; offset += B_PAGE_SIZE) {
316		map_page(get_next_virtual_page(), physicalAddress + offset, flags);
317	}
318
319	return address + pageOffset;
320}
321
322
323extern "C" void *
324mmu_allocate(void *virtualAddress, size_t size)
325{
326	TRACE(("mmu_allocate: requested vaddr: %p, next free vaddr: 0x%lx, size: %ld\n",
327		virtualAddress, sNextVirtualAddress, size));
328
329	size = (size + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
330		// get number of pages to map
331
332	if (virtualAddress != NULL) {
333		// This special path is almost only useful for loading the
334		// kernel into memory; it will only allow you to map the
335		// 1 MB following the kernel base address.
336		// Also, it won't check for already mapped addresses, so
337		// you better know why you are here :)
338		addr_t address = (addr_t)virtualAddress;
339
340		// is the address within the valid range?
341		if (address < KERNEL_LOAD_BASE || address + size * B_PAGE_SIZE
342			>= KERNEL_LOAD_BASE + kMaxKernelSize)
343			return NULL;
344
345		for (uint32 i = 0; i < size; i++) {
346			map_page(address, get_next_physical_page(), kDefaultPageFlags);
347			address += B_PAGE_SIZE;
348		}
349
350		TRACE(("mmu_allocate(KERNEL, %d): done\n", size));
351		return virtualAddress;
352	}
353
354	void *address = (void *)sNextVirtualAddress;
355
356	for (uint32 i = 0; i < size; i++) {
357		map_page(get_next_virtual_page(), get_next_physical_page(), kDefaultPageFlags);
358	}
359
360	TRACE(("mmu_allocate(NULL, %d): %p\n", size, address));
361	return address;
362}
363
364
365/**	This will unmap the allocated chunk of memory from the virtual
366 *	address space. It might not actually free memory (as its implementation
367 *	is very simple), but it might.
368 */
369
370extern "C" void
371mmu_free(void *virtualAddress, size_t size)
372{
373	TRACE(("mmu_free(virtualAddress = %p, size: %ld)\n", virtualAddress, size));
374
375	addr_t address = (addr_t)virtualAddress;
376	addr_t pageOffset = address % B_PAGE_SIZE;
377	address -= pageOffset;
378	size = (size + pageOffset + B_PAGE_SIZE - 1) / B_PAGE_SIZE * B_PAGE_SIZE;
379
380	// is the address within the valid range?
381	if (address < KERNEL_LOAD_BASE || address + size > sNextVirtualAddress) {
382		panic("mmu_free: asked to unmap out of range region (%p, size %lx)\n",
383			(void *)address, size);
384	}
385
386	// unmap all pages within the range
387	for (size_t i = 0; i < size; i += B_PAGE_SIZE) {
388		unmap_page(address);
389		address += B_PAGE_SIZE;
390	}
391
392	if (address == sNextVirtualAddress) {
393		// we can actually reuse the virtual address space
394		sNextVirtualAddress -= size;
395	}
396}
397
398
399/** Sets up the final and kernel accessible GDT and IDT tables.
400 *	BIOS calls won't work any longer after this function has
401 *	been called.
402 */
403
404extern "C" void
405mmu_init_for_kernel(void)
406{
407	TRACE(("mmu_init_for_kernel\n"));
408
409
410
411
412	// remove identity mapping of ST space
413	// actually done by the kernel when it's done using query_early
414	//gMMUOps->set_tt(0, NULL, 0, 0);
415
416#if 0
417	// set up a new idt
418	{
419		struct gdt_idt_descr idtDescriptor;
420		uint32 *idt;
421
422		// find a new idt
423		idt = (uint32 *)get_next_physical_page();
424		gKernelArgs.arch_args.phys_idt = (uint32)idt;
425
426		TRACE(("idt at %p\n", idt));
427
428		// map the idt into virtual space
429		gKernelArgs.arch_args.vir_idt = (uint32)get_next_virtual_page();
430		map_page(gKernelArgs.arch_args.vir_idt, (uint32)idt, kDefaultPageFlags);
431
432		// clear it out
433		uint32* virtualIDT = (uint32*)gKernelArgs.arch_args.vir_idt;
434		for (int32 i = 0; i < IDT_LIMIT / 4; i++) {
435			virtualIDT[i] = 0;
436		}
437
438		// load the idt
439		idtDescriptor.limit = IDT_LIMIT - 1;
440		idtDescriptor.base = (uint32 *)gKernelArgs.arch_args.vir_idt;
441
442		asm("lidt	%0;"
443			: : "m" (idtDescriptor));
444
445		TRACE(("idt at virtual address 0x%lx\n", gKernelArgs.arch_args.vir_idt));
446	}
447
448	// set up a new gdt
449	{
450		struct gdt_idt_descr gdtDescriptor;
451		segment_descriptor *gdt;
452
453		// find a new gdt
454		gdt = (segment_descriptor *)get_next_physical_page();
455		gKernelArgs.arch_args.phys_gdt = (uint32)gdt;
456
457		TRACE(("gdt at %p\n", gdt));
458
459		// map the gdt into virtual space
460		gKernelArgs.arch_args.vir_gdt = (uint32)get_next_virtual_page();
461		map_page(gKernelArgs.arch_args.vir_gdt, (uint32)gdt, kDefaultPageFlags);
462
463		// put standard segment descriptors in it
464		segment_descriptor* virtualGDT
465			= (segment_descriptor*)gKernelArgs.arch_args.vir_gdt;
466		clear_segment_descriptor(&virtualGDT[0]);
467
468		// seg 0x08 - kernel 4GB code
469		set_segment_descriptor(&virtualGDT[1], 0, 0xffffffff, DT_CODE_READABLE,
470			DPL_KERNEL);
471
472		// seg 0x10 - kernel 4GB data
473		set_segment_descriptor(&virtualGDT[2], 0, 0xffffffff, DT_DATA_WRITEABLE,
474			DPL_KERNEL);
475
476		// seg 0x1b - ring 3 user 4GB code
477		set_segment_descriptor(&virtualGDT[3], 0, 0xffffffff, DT_CODE_READABLE,
478			DPL_USER);
479
480		// seg 0x23 - ring 3 user 4GB data
481		set_segment_descriptor(&virtualGDT[4], 0, 0xffffffff, DT_DATA_WRITEABLE,
482			DPL_USER);
483
484		// virtualGDT[5] and above will be filled later by the kernel
485		// to contain the TSS descriptors, and for TLS (one for every CPU)
486
487		// load the GDT
488		gdtDescriptor.limit = GDT_LIMIT - 1;
489		gdtDescriptor.base = (uint32 *)gKernelArgs.arch_args.vir_gdt;
490
491		asm("lgdt	%0;"
492			: : "m" (gdtDescriptor));
493
494		TRACE(("gdt at virtual address %p\n", (void *)gKernelArgs.arch_args.vir_gdt));
495	}
496#endif
497
498	// save the memory we've physically allocated
499	gKernelArgs.physical_allocated_range[0].size = sNextPhysicalAddress - gKernelArgs.physical_allocated_range[0].start;
500
501	// save the memory we've virtually allocated (for the kernel and other stuff)
502	gKernelArgs.virtual_allocated_range[0].start = KERNEL_LOAD_BASE;
503	gKernelArgs.virtual_allocated_range[0].size = sNextVirtualAddress - KERNEL_LOAD_BASE;
504	gKernelArgs.num_virtual_allocated_ranges = 1;
505
506	// sort the address ranges
507	sort_address_ranges(gKernelArgs.physical_memory_range,
508		gKernelArgs.num_physical_memory_ranges);
509	sort_address_ranges(gKernelArgs.physical_allocated_range,
510		gKernelArgs.num_physical_allocated_ranges);
511	sort_address_ranges(gKernelArgs.virtual_allocated_range,
512		gKernelArgs.num_virtual_allocated_ranges);
513
514#ifdef TRACE_MMU
515	{
516		uint32 i;
517
518		dprintf("phys memory ranges:\n");
519		for (i = 0; i < gKernelArgs.num_physical_memory_ranges; i++) {
520			dprintf("    base 0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
521				gKernelArgs.physical_memory_range[i].start,
522				gKernelArgs.physical_memory_range[i].size);
523		}
524
525		dprintf("allocated phys memory ranges:\n");
526		for (i = 0; i < gKernelArgs.num_physical_allocated_ranges; i++) {
527			dprintf("    base 0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
528				gKernelArgs.physical_allocated_range[i].start,
529				gKernelArgs.physical_allocated_range[i].size);
530		}
531
532		dprintf("allocated virt memory ranges:\n");
533		for (i = 0; i < gKernelArgs.num_virtual_allocated_ranges; i++) {
534			dprintf("    base 0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
535				gKernelArgs.virtual_allocated_range[i].start,
536				gKernelArgs.virtual_allocated_range[i].size);
537		}
538	}
539#endif
540}
541
542
543extern "C" void
544mmu_init(void)
545{
546	TRACE(("mmu_init\n"));
547	switch (gKernelArgs.arch_args.mmu_type) {
548#if 0
549		case 68851:
550			gMMUOps = &k851MMUOps;
551			break;
552#endif
553		case 68030:
554			gMMUOps = &k030MMUOps;
555			break;
556		case 68040:
557			gMMUOps = &k040MMUOps;
558			break;
559#if 0
560		case 68060:
561			gMMUOps = &k060MMUOps;
562			break;
563#endif
564		default:
565			panic("unknown mmu type %d\n", gKernelArgs.arch_args.mmu_type);
566	}
567
568	gMMUOps->initialize();
569
570#if 0
571	addr_t fastram_top = 0;
572	if (*TOSVARramvalid == TOSVARramvalid_MAGIC)
573		fastram_top = *TOSVARramtop;
574	if (fastram_top) {
575		// we have some fastram, use it first
576		sNextPhysicalAddress = AMIGA_FASTRAM_BASE;
577	}
578
579	gKernelArgs.physical_allocated_range[0].start = sNextPhysicalAddress;
580	gKernelArgs.physical_allocated_range[0].size = 0;
581	gKernelArgs.num_physical_allocated_ranges = 1;
582		// remember the start of the allocated physical pages
583
584	// enable transparent translation of the first 256 MB
585	gMMUOps->set_tt(0, AMIGA_CHIPRAM_BASE, 0x10000000, 0);
586	// enable transparent translation of the 16MB ST shadow range for I/O
587	gMMUOps->set_tt(1, AMIGA_SHADOW_BASE, 0x01000000, 0);
588
589	init_page_directory();
590#if 0//XXX:HOLE
591
592	// Map the page directory into kernel space at 0xffc00000-0xffffffff
593	// this enables a mmu trick where the 4 MB region that this pgdir entry
594	// represents now maps the 4MB of potential pagetables that the pgdir
595	// points to. Thrown away later in VM bringup, but useful for now.
596	gPageRoot[1023] = (uint32)gPageRoot | kDefaultPageFlags;
597#endif
598
599	// also map it on the next vpage
600	gKernelArgs.arch_args.vir_pgroot = get_next_virtual_page();
601	map_page(gKernelArgs.arch_args.vir_pgroot, (uint32)gPageRoot, kDefaultPageFlags);
602
603	// set virtual addr for interrupt vector table
604	gKernelArgs.arch_args.vir_vbr = gKernelArgs.arch_args.vir_pgroot
605		+ VBR_PAGE_OFFSET;
606
607	// map in a kernel stack
608	gKernelArgs.cpu_kstack[0].start = (addr_t)mmu_allocate(NULL,
609		KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE);
610	gKernelArgs.cpu_kstack[0].size = KERNEL_STACK_SIZE
611		+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
612
613	TRACE(("kernel stack at 0x%lx to 0x%lx\n", gKernelArgs.cpu_kstack[0].start,
614		gKernelArgs.cpu_kstack[0].start + gKernelArgs.cpu_kstack[0].size));
615
616	// st ram as 1st range
617	gKernelArgs.physical_memory_range[0].start = AMIGA_CHIPRAM_BASE;
618	gKernelArgs.physical_memory_range[0].size = *TOSVARphystop - AMIGA_CHIPRAM_BASE;
619	gKernelArgs.num_physical_memory_ranges = 1;
620
621	// fast ram as 2nd range
622	if (fastram_top) {
623		gKernelArgs.physical_memory_range[1].start =
624			AMIGA_FASTRAM_BASE;
625		gKernelArgs.physical_memory_range[1].size =
626			fastram_top - AMIGA_FASTRAM_BASE;
627		gKernelArgs.num_physical_memory_ranges++;
628
629	}
630
631	// mark the video area allocated
632	addr_t video_base = *TOSVAR_memtop;
633	video_base &= ~(B_PAGE_SIZE-1);
634	gKernelArgs.physical_allocated_range[gKernelArgs.num_physical_allocated_ranges].start = video_base;
635	gKernelArgs.physical_allocated_range[gKernelArgs.num_physical_allocated_ranges].size = *TOSVARphystop - video_base;
636	gKernelArgs.num_physical_allocated_ranges++;
637
638#endif
639}
640
641
642//	#pragma mark -
643
644
645extern "C" status_t
646platform_allocate_region(void **_address, size_t size, uint8 protection,
647	bool /*exactAddress*/)
648{
649	void *address = mmu_allocate(*_address, size);
650	if (address == NULL)
651		return B_NO_MEMORY;
652
653	*_address = address;
654	return B_OK;
655}
656
657
658extern "C" status_t
659platform_free_region(void *address, size_t size)
660{
661	mmu_free(address, size);
662	return B_OK;
663}
664
665
666void
667platform_release_heap(struct stage2_args *args, void *base)
668{
669	// It will be freed automatically, since it is in the
670	// identity mapped region, and not stored in the kernel's
671	// page tables.
672}
673
674
675status_t
676platform_init_heap(struct stage2_args *args, void **_base, void **_top)
677{
678	void *heap = (void *)get_next_physical_address(args->heap_size);
679	if (heap == NULL)
680		return B_NO_MEMORY;
681
682	*_base = heap;
683	*_top = (void *)((int8 *)heap + args->heap_size);
684	return B_OK;
685}
686
687
688