1/*
2 * Copyright 2008-2010, Fran��ois Revol, revol@free.fr. All rights reserved.
3 * Copyright 2004-2007, Axel D��rfler, axeld@pinc-software.de.
4 * Based on code written by Travis Geiselbrecht for NewOS.
5 *
6 * Distributed under the terms of the MIT License.
7 */
8
9
10#include "atari_memory_map.h"
11#include "toscalls.h"
12#include "mmu.h"
13
14#include <boot/platform.h>
15#include <boot/stdio.h>
16#include <boot/kernel_args.h>
17#include <boot/stage2.h>
18#include <arch/cpu.h>
19#include <arch_kernel.h>
20#include <kernel.h>
21
22#include <OS.h>
23
24#include <string.h>
25
26
27//XXX: x86
28/** The (physical) memory layout of the boot loader is currently as follows:
29 *	  0x0500 - 0x10000	protected mode stack
30 *	  0x0500 - 0x09000	real mode stack
31 *	 0x10000 - ?		code (up to ~500 kB)
32 *	 0x90000			1st temporary page table (identity maps 0-4 MB)
33 *	 0x91000			2nd (4-8 MB)
34 *	 0x92000 - 0x92000	further page tables
35 *	 0x9e000 - 0xa0000	SMP trampoline code
36 *	[0xa0000 - 0x100000	BIOS/ROM/reserved area]
37 *	0x100000			page directory
38 *	     ...			boot loader heap (32 kB)
39 *	     ...			free physical memory
40 *
41 *	The first 8 MB are identity mapped (0x0 - 0x0800000); paging is turned
42 *	on. The kernel is mapped at 0x80000000, all other stuff mapped by the
43 *	loader (kernel args, modules, driver settings, ...) comes after
44 *	0x81000000 which means that there is currently only 1 MB reserved for
45 *	the kernel itself (see kMaxKernelSize).
46 */
47
48// notes m68k:
49/** The (physical) memory layout of the boot loader is currently as follows:
50 *	  0x0800 - 0x10000	supervisor mode stack (1) XXX: more ? x86 starts at 500
51 *	 0x10000 - ?		code (up to ~500 kB)
52 *  0x100000 or FAST_RAM_BASE if any:
53 *	     ...			page root directory
54 *	     ...			interrupt vectors (VBR)
55 *	     ...			page directory
56 *	     ...			boot loader heap (32 kB)
57 *	     ...			free physical memory
58 *  0xdNNNNN			video buffer usually there, as per v_bas_ad
59 *						(=Logbase() but Physbase() is better)
60 *
61 *	The first 32 MB (2) are identity mapped (0x0 - 0x1000000); paging
62 *	is turned on. The kernel is mapped at 0x80000000, all other stuff
63 *	mapped by the loader (kernel args, modules, driver settings, ...)
64 *	comes after 0x81000000 which means that there is currently only
65 *	1 MB reserved for the kernel itself (see kMaxKernelSize).
66 *
67 *	(1) no need for user stack, we are already in supervisor mode in the
68 *	loader.
69 *	(2) maps the whole regular ST space; transparent translation registers
70 *	have larger granularity anyway.
71 */
72#warning M68K: check for Physbase() < ST_RAM_TOP
73
74#define TRACE_MMU
75#ifdef TRACE_MMU
76#	define TRACE(x) dprintf x
77#else
78#	define TRACE(x) ;
79#endif
80
81
82// since the page root directory doesn't take a full page (1k)
83// we stuff some other stuff after it, like the interrupt vectors (1k)
84#define VBR_PAGE_OFFSET 1024
85
86static const uint32 kDefaultPageTableFlags = 0x07;	// present, user, R/W
87static const size_t kMaxKernelSize = 0x200000;		// 2 MB for the kernel
88
89// working page directory and page table
90addr_t gPageRoot = 0;
91
92static addr_t sNextPhysicalAddress = 0x100000;
93static addr_t sNextVirtualAddress = KERNEL_LOAD_BASE + kMaxKernelSize;
94static addr_t sMaxVirtualAddress = KERNEL_LOAD_BASE /*+ 0x400000*/;
95
96#if 0
97static addr_t sNextPageTableAddress = 0x90000;
98static const uint32 kPageTableRegionEnd = 0x9e000;
99	// we need to reserve 2 pages for the SMP trampoline code XXX:no
100#endif
101
102static const struct boot_mmu_ops *gMMUOps;
103
104static addr_t
105get_next_virtual_address(size_t size)
106{
107	addr_t address = sNextVirtualAddress;
108	sNextVirtualAddress += size;
109
110	TRACE(("%s(%d): %08x\n", __FUNCTION__, size, address));
111	return address;
112}
113
114
115static addr_t
116get_next_physical_address(size_t size)
117{
118	addr_t address = sNextPhysicalAddress;
119	sNextPhysicalAddress += size;
120
121	TRACE(("%s(%d): %08x\n", __FUNCTION__, size, address));
122	return address;
123}
124
125
126static addr_t
127get_next_virtual_page()
128{
129	TRACE(("%s\n", __FUNCTION__));
130	return get_next_virtual_address(B_PAGE_SIZE);
131}
132
133
134static addr_t
135get_next_physical_page()
136{
137	TRACE(("%s\n", __FUNCTION__));
138	return get_next_physical_address(B_PAGE_SIZE);
139}
140
141
142// allocate a page worth of page dir or tables
143extern "C" addr_t
144mmu_get_next_page_tables()
145{
146#if 0
147	TRACE(("mmu_get_next_page_tables, sNextPageTableAddress %p, kPageTableRegionEnd %p\n",
148		sNextPageTableAddress, kPageTableRegionEnd));
149
150	addr_t address = sNextPageTableAddress;
151	if (address >= kPageTableRegionEnd)
152		return (uint32 *)get_next_physical_page();
153
154	sNextPageTableAddress += B_PAGE_SIZE;
155	return (uint32 *)address;
156#endif
157	addr_t tbl = get_next_physical_page();
158	if (!tbl)
159		return tbl;
160	// shouldn't we fill this ?
161	//gKernelArgs.arch_args.pgtables[gKernelArgs.arch_args.num_pgtables++] = (uint32)pageTable;
162
163#if 0
164	// clear them
165	uint32 *p = (uint32 *)tbl;
166	for (int32 i = 0; i < 1024; i++)
167		p[i] = 0;
168#endif
169	return tbl;
170}
171
172#if 0
173/**	Adds a new page table for the specified base address */
174
175static void
176add_page_table(addr_t base)
177{
178	TRACE(("add_page_table(base = %p)\n", (void *)base));
179#if 0
180
181	// Get new page table and clear it out
182	uint32 *pageTable = mmu_get_next_page_tables();
183	if (pageTable > (uint32 *)(8 * 1024 * 1024))
184		panic("tried to add page table beyond the indentity mapped 8 MB region\n");
185
186	gKernelArgs.arch_args.pgtables[gKernelArgs.arch_args.num_pgtables++] = (uint32)pageTable;
187
188	for (int32 i = 0; i < 1024; i++)
189		pageTable[i] = 0;
190
191	// put the new page table into the page directory
192	gPageRoot[base/(4*1024*1024)] = (uint32)pageTable | kDefaultPageTableFlags;
193#endif
194}
195#endif
196
197
198static void
199unmap_page(addr_t virtualAddress)
200{
201	gMMUOps->unmap_page(virtualAddress);
202}
203
204
205/** Creates an entry to map the specified virtualAddress to the given
206 *	physicalAddress.
207 *	If the mapping goes beyond the current page table, it will allocate
208 *	a new one. If it cannot map the requested page, it panics.
209 */
210
211static void
212map_page(addr_t virtualAddress, addr_t physicalAddress, uint32 flags)
213{
214	TRACE(("map_page: vaddr 0x%lx, paddr 0x%lx\n", virtualAddress, physicalAddress));
215
216	if (virtualAddress < KERNEL_LOAD_BASE)
217		panic("map_page: asked to map invalid page %p!\n", (void *)virtualAddress);
218
219	// slow but I'm too lazy to fix the code below
220	gMMUOps->add_page_table(virtualAddress);
221#if 0
222	if (virtualAddress >= sMaxVirtualAddress) {
223		// we need to add a new page table
224
225		gMMUOps->add_page_table(sMaxVirtualAddress);
226		// 64 pages / page table
227		sMaxVirtualAddress += B_PAGE_SIZE * 64;
228
229		if (virtualAddress >= sMaxVirtualAddress)
230			panic("map_page: asked to map a page to %p\n", (void *)virtualAddress);
231	}
232#endif
233
234	physicalAddress &= ~(B_PAGE_SIZE - 1);
235
236	// map the page to the correct page table
237	gMMUOps->map_page(virtualAddress, physicalAddress, flags);
238}
239
240
241static void
242init_page_directory(void)
243{
244	TRACE(("init_page_directory\n"));
245
246	// allocate a new pg root dir
247	gPageRoot = get_next_physical_page();
248	gKernelArgs.arch_args.phys_pgroot = (uint32)gPageRoot;
249	gKernelArgs.arch_args.phys_vbr = (uint32)gPageRoot + VBR_PAGE_OFFSET;
250
251	// set the root pointers
252	gMMUOps->load_rp(gPageRoot);
253	// allocate second level tables for kernel space
254	// this will simplify mmu code a lot, and only wastes 32KB
255	gMMUOps->allocate_kernel_pgdirs();
256	// enable mmu translation
257	gMMUOps->enable_paging();
258	//XXX: check for errors
259
260	//gKernelArgs.arch_args.num_pgtables = 0;
261	gMMUOps->add_page_table(KERNEL_LOAD_BASE);
262
263#if 0
264
265
266	// clear out the pgdir
267	for (int32 i = 0; i < 1024; i++) {
268		gPageRoot[i] = 0;
269	}
270
271	// Identity map the first 8 MB of memory so that their
272	// physical and virtual address are the same.
273	// These page tables won't be taken over into the kernel.
274
275	// make the first page table at the first free spot
276	uint32 *pageTable = mmu_get_next_page_tables();
277
278	for (int32 i = 0; i < 1024; i++) {
279		pageTable[i] = (i * 0x1000) | kDefaultPageFlags;
280	}
281
282	gPageRoot[0] = (uint32)pageTable | kDefaultPageFlags;
283
284	// make the second page table
285	pageTable = mmu_get_next_page_tables();
286
287	for (int32 i = 0; i < 1024; i++) {
288		pageTable[i] = (i * 0x1000 + 0x400000) | kDefaultPageFlags;
289	}
290
291	gPageRoot[1] = (uint32)pageTable | kDefaultPageFlags;
292
293	gKernelArgs.arch_args.num_pgtables = 0;
294	add_page_table(KERNEL_LOAD_BASE);
295
296	// switch to the new pgdir and enable paging
297	asm("movl %0, %%eax;"
298		"movl %%eax, %%cr3;" : : "m" (gPageRoot) : "eax");
299	// Important.  Make sure supervisor threads can fault on read only pages...
300	asm("movl %%eax, %%cr0" : : "a" ((1 << 31) | (1 << 16) | (1 << 5) | 1));
301#endif
302}
303
304
305//	#pragma mark -
306
307
308extern "C" addr_t
309mmu_map_physical_memory(addr_t physicalAddress, size_t size, uint32 flags)
310{
311	addr_t address = sNextVirtualAddress;
312	addr_t pageOffset = physicalAddress & (B_PAGE_SIZE - 1);
313
314	physicalAddress -= pageOffset;
315
316	for (addr_t offset = 0; offset < size; offset += B_PAGE_SIZE) {
317		map_page(get_next_virtual_page(), physicalAddress + offset, flags);
318	}
319
320	return address + pageOffset;
321}
322
323
324extern "C" void *
325mmu_allocate(void *virtualAddress, size_t size)
326{
327	TRACE(("mmu_allocate: requested vaddr: %p, next free vaddr: 0x%lx, size: %ld\n",
328		virtualAddress, sNextVirtualAddress, size));
329
330	size = (size + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
331		// get number of pages to map
332
333	if (virtualAddress != NULL) {
334		// This special path is almost only useful for loading the
335		// kernel into memory; it will only allow you to map the
336		// 1 MB following the kernel base address.
337		// Also, it won't check for already mapped addresses, so
338		// you better know why you are here :)
339		addr_t address = (addr_t)virtualAddress;
340
341		// is the address within the valid range?
342		if (address < KERNEL_LOAD_BASE || address + size * B_PAGE_SIZE
343			>= KERNEL_LOAD_BASE + kMaxKernelSize)
344			return NULL;
345
346		for (uint32 i = 0; i < size; i++) {
347			map_page(address, get_next_physical_page(), kDefaultPageFlags);
348			address += B_PAGE_SIZE;
349		}
350
351		TRACE(("mmu_allocate(KERNEL, %d): done\n", size));
352		return virtualAddress;
353	}
354
355	void *address = (void *)sNextVirtualAddress;
356
357	for (uint32 i = 0; i < size; i++) {
358		map_page(get_next_virtual_page(), get_next_physical_page(), kDefaultPageFlags);
359	}
360
361	TRACE(("mmu_allocate(NULL, %d): %p\n", size, address));
362	return address;
363}
364
365
366/**	This will unmap the allocated chunk of memory from the virtual
367 *	address space. It might not actually free memory (as its implementation
368 *	is very simple), but it might.
369 */
370
371extern "C" void
372mmu_free(void *virtualAddress, size_t size)
373{
374	TRACE(("mmu_free(virtualAddress = %p, size: %ld)\n", virtualAddress, size));
375
376	addr_t address = (addr_t)virtualAddress;
377	addr_t pageOffset = address % B_PAGE_SIZE;
378	address -= pageOffset;
379	size = (size + pageOffset + B_PAGE_SIZE - 1) / B_PAGE_SIZE * B_PAGE_SIZE;
380
381	// is the address within the valid range?
382	if (address < KERNEL_LOAD_BASE || address + size > sNextVirtualAddress) {
383		panic("mmu_free: asked to unmap out of range region (%p, size %lx)\n",
384			(void *)address, size);
385	}
386
387	// unmap all pages within the range
388	for (size_t i = 0; i < size; i += B_PAGE_SIZE) {
389		unmap_page(address);
390		address += B_PAGE_SIZE;
391	}
392
393	if (address == sNextVirtualAddress) {
394		// we can actually reuse the virtual address space
395		sNextVirtualAddress -= size;
396	}
397}
398
399
400/** Sets up the final and kernel accessible GDT and IDT tables.
401 *	BIOS calls won't work any longer after this function has
402 *	been called.
403 */
404
405extern "C" void
406mmu_init_for_kernel(void)
407{
408	TRACE(("mmu_init_for_kernel\n"));
409
410
411
412
413	// remove identity mapping of ST space
414	// actually done by the kernel when it's done using query_early
415	//gMMUOps->set_tt(0, NULL, 0, 0);
416
417#if 0
418	// set up a new idt
419	{
420		struct gdt_idt_descr idtDescriptor;
421		uint32 *idt;
422
423		// find a new idt
424		idt = (uint32 *)get_next_physical_page();
425		gKernelArgs.arch_args.phys_idt = (uint32)idt;
426
427		TRACE(("idt at %p\n", idt));
428
429		// map the idt into virtual space
430		gKernelArgs.arch_args.vir_idt = (uint32)get_next_virtual_page();
431		map_page(gKernelArgs.arch_args.vir_idt, (uint32)idt, kDefaultPageFlags);
432
433		// clear it out
434		uint32* virtualIDT = (uint32*)gKernelArgs.arch_args.vir_idt;
435		for (int32 i = 0; i < IDT_LIMIT / 4; i++) {
436			virtualIDT[i] = 0;
437		}
438
439		// load the idt
440		idtDescriptor.limit = IDT_LIMIT - 1;
441		idtDescriptor.base = (uint32 *)gKernelArgs.arch_args.vir_idt;
442
443		asm("lidt	%0;"
444			: : "m" (idtDescriptor));
445
446		TRACE(("idt at virtual address 0x%lx\n", gKernelArgs.arch_args.vir_idt));
447	}
448
449	// set up a new gdt
450	{
451		struct gdt_idt_descr gdtDescriptor;
452		segment_descriptor *gdt;
453
454		// find a new gdt
455		gdt = (segment_descriptor *)get_next_physical_page();
456		gKernelArgs.arch_args.phys_gdt = (uint32)gdt;
457
458		TRACE(("gdt at %p\n", gdt));
459
460		// map the gdt into virtual space
461		gKernelArgs.arch_args.vir_gdt = (uint32)get_next_virtual_page();
462		map_page(gKernelArgs.arch_args.vir_gdt, (uint32)gdt, kDefaultPageFlags);
463
464		// put standard segment descriptors in it
465		segment_descriptor* virtualGDT
466			= (segment_descriptor*)gKernelArgs.arch_args.vir_gdt;
467		clear_segment_descriptor(&virtualGDT[0]);
468
469		// seg 0x08 - kernel 4GB code
470		set_segment_descriptor(&virtualGDT[1], 0, 0xffffffff, DT_CODE_READABLE,
471			DPL_KERNEL);
472
473		// seg 0x10 - kernel 4GB data
474		set_segment_descriptor(&virtualGDT[2], 0, 0xffffffff, DT_DATA_WRITEABLE,
475			DPL_KERNEL);
476
477		// seg 0x1b - ring 3 user 4GB code
478		set_segment_descriptor(&virtualGDT[3], 0, 0xffffffff, DT_CODE_READABLE,
479			DPL_USER);
480
481		// seg 0x23 - ring 3 user 4GB data
482		set_segment_descriptor(&virtualGDT[4], 0, 0xffffffff, DT_DATA_WRITEABLE,
483			DPL_USER);
484
485		// virtualGDT[5] and above will be filled later by the kernel
486		// to contain the TSS descriptors, and for TLS (one for every CPU)
487
488		// load the GDT
489		gdtDescriptor.limit = GDT_LIMIT - 1;
490		gdtDescriptor.base = (uint32 *)gKernelArgs.arch_args.vir_gdt;
491
492		asm("lgdt	%0;"
493			: : "m" (gdtDescriptor));
494
495		TRACE(("gdt at virtual address %p\n", (void *)gKernelArgs.arch_args.vir_gdt));
496	}
497#endif
498
499	// save the memory we've physically allocated
500	gKernelArgs.physical_allocated_range[0].size = sNextPhysicalAddress - gKernelArgs.physical_allocated_range[0].start;
501
502	// save the memory we've virtually allocated (for the kernel and other stuff)
503	gKernelArgs.virtual_allocated_range[0].start = KERNEL_LOAD_BASE;
504	gKernelArgs.virtual_allocated_range[0].size = sNextVirtualAddress - KERNEL_LOAD_BASE;
505	gKernelArgs.num_virtual_allocated_ranges = 1;
506
507	// sort the address ranges
508	sort_address_ranges(gKernelArgs.physical_memory_range,
509		gKernelArgs.num_physical_memory_ranges);
510	sort_address_ranges(gKernelArgs.physical_allocated_range,
511		gKernelArgs.num_physical_allocated_ranges);
512	sort_address_ranges(gKernelArgs.virtual_allocated_range,
513		gKernelArgs.num_virtual_allocated_ranges);
514
515#ifdef TRACE_MMU
516	{
517		uint32 i;
518
519		dprintf("phys memory ranges:\n");
520		for (i = 0; i < gKernelArgs.num_physical_memory_ranges; i++) {
521			dprintf("    base 0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
522				gKernelArgs.physical_memory_range[i].start,
523				gKernelArgs.physical_memory_range[i].size);
524		}
525
526		dprintf("allocated phys memory ranges:\n");
527		for (i = 0; i < gKernelArgs.num_physical_allocated_ranges; i++) {
528			dprintf("    base 0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
529				gKernelArgs.physical_allocated_range[i].start,
530				gKernelArgs.physical_allocated_range[i].size);
531		}
532
533		dprintf("allocated virt memory ranges:\n");
534		for (i = 0; i < gKernelArgs.num_virtual_allocated_ranges; i++) {
535			dprintf("    base 0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
536				gKernelArgs.virtual_allocated_range[i].start,
537				gKernelArgs.virtual_allocated_range[i].size);
538		}
539	}
540#endif
541}
542
543
544extern "C" void
545mmu_init(void)
546{
547	TRACE(("mmu_init\n"));
548	switch (gKernelArgs.arch_args.mmu_type) {
549#if 0
550		case 68851:
551			gMMUOps = &k851MMUOps;
552			break;
553#endif
554		case 68030:
555			gMMUOps = &k030MMUOps;
556			break;
557		case 68040:
558			gMMUOps = &k040MMUOps;
559			break;
560#if 0
561		case 68060:
562			gMMUOps = &k060MMUOps;
563			break;
564#endif
565		default:
566			panic("unknown mmu type %d\n", gKernelArgs.arch_args.mmu_type);
567	}
568
569	gMMUOps->initialize();
570
571	addr_t fastram_top = 0;
572	if (*TOSVARramvalid == TOSVARramvalid_MAGIC)
573		fastram_top = *TOSVARramtop;
574	if (fastram_top) {
575		// we have some fastram, use it first
576		sNextPhysicalAddress = ATARI_FASTRAM_BASE;
577	}
578
579	gKernelArgs.physical_allocated_range[0].start = sNextPhysicalAddress;
580	gKernelArgs.physical_allocated_range[0].size = 0;
581	gKernelArgs.num_physical_allocated_ranges = 1;
582		// remember the start of the allocated physical pages
583
584	TRACE(("mmu_init: enabling transparent translation\n"));
585	// enable transparent translation of the first 256 MB
586	gMMUOps->set_tt(0, ATARI_CHIPRAM_BASE, 0x10000000, 0);
587	// enable transparent translation of the 16MB ST shadow range for I/O
588	gMMUOps->set_tt(1, ATARI_SHADOW_BASE, 0x01000000, 0);
589
590	TRACE(("mmu_init: init rtdir\n"));
591	init_page_directory();
592#if 0//XXX:HOLE
593
594	// Map the page directory into kernel space at 0xffc00000-0xffffffff
595	// this enables a mmu trick where the 4 MB region that this pgdir entry
596	// represents now maps the 4MB of potential pagetables that the pgdir
597	// points to. Thrown away later in VM bringup, but useful for now.
598	gPageRoot[1023] = (uint32)gPageRoot | kDefaultPageFlags;
599#endif
600
601	// also map it on the next vpage
602	gKernelArgs.arch_args.vir_pgroot = get_next_virtual_page();
603	map_page(gKernelArgs.arch_args.vir_pgroot, (uint32)gPageRoot, kDefaultPageFlags);
604
605	// set virtual addr for interrupt vector table
606	gKernelArgs.arch_args.vir_vbr = gKernelArgs.arch_args.vir_pgroot
607		+ VBR_PAGE_OFFSET;
608
609	// map in a kernel stack
610	gKernelArgs.cpu_kstack[0].start = (addr_t)mmu_allocate(NULL,
611		KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE);
612	gKernelArgs.cpu_kstack[0].size = KERNEL_STACK_SIZE
613		+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
614
615	TRACE(("kernel stack at 0x%lx to 0x%lx\n", gKernelArgs.cpu_kstack[0].start,
616		gKernelArgs.cpu_kstack[0].start + gKernelArgs.cpu_kstack[0].size));
617
618	// st ram as 1st range
619	gKernelArgs.physical_memory_range[0].start = ATARI_CHIPRAM_BASE;
620	gKernelArgs.physical_memory_range[0].size = *TOSVARphystop - ATARI_CHIPRAM_BASE;
621	gKernelArgs.num_physical_memory_ranges = 1;
622
623	// fast ram as 2nd range
624	if (fastram_top) {
625		gKernelArgs.physical_memory_range[1].start =
626			ATARI_FASTRAM_BASE;
627		gKernelArgs.physical_memory_range[1].size =
628			fastram_top - ATARI_FASTRAM_BASE;
629		gKernelArgs.num_physical_memory_ranges++;
630
631	}
632
633	// mark the video area allocated
634	addr_t video_base = *TOSVAR_memtop;
635	video_base &= ~(B_PAGE_SIZE-1);
636	gKernelArgs.physical_allocated_range[gKernelArgs.num_physical_allocated_ranges].start = video_base;
637	gKernelArgs.physical_allocated_range[gKernelArgs.num_physical_allocated_ranges].size = *TOSVARphystop - video_base;
638	gKernelArgs.num_physical_allocated_ranges++;
639
640
641	gKernelArgs.arch_args.plat_args.atari.nat_feat.nf_page =
642		get_next_physical_page() /*| 0xff000000*/;
643
644}
645
646
647//	#pragma mark -
648
649
650extern "C" status_t
651platform_allocate_region(void **_address, size_t size, uint8 protection,
652	bool /*exactAddress*/)
653{
654	void *address = mmu_allocate(*_address, size);
655	if (address == NULL)
656		return B_NO_MEMORY;
657
658	*_address = address;
659	return B_OK;
660}
661
662
663extern "C" status_t
664platform_free_region(void *address, size_t size)
665{
666	mmu_free(address, size);
667	return B_OK;
668}
669
670
671void
672platform_release_heap(struct stage2_args *args, void *base)
673{
674	// It will be freed automatically, since it is in the
675	// identity mapped region, and not stored in the kernel's
676	// page tables.
677}
678
679
680status_t
681platform_init_heap(struct stage2_args *args, void **_base, void **_top)
682{
683	void *heap = (void *)get_next_physical_address(args->heap_size);
684	if (heap == NULL)
685		return B_NO_MEMORY;
686
687	*_base = heap;
688	*_top = (void *)((int8 *)heap + args->heap_size);
689	return B_OK;
690}
691
692
693extern "C" status_t
694platform_bootloader_address_to_kernel_address(void *address, addr_t *_result)
695{
696	TRACE(("%s: called\n", __func__));
697	// atari_m68k really doesn't need an address conversion
698	*_result = (addr_t)address;
699	return B_OK;
700}
701
702
703extern "C" status_t
704platform_kernel_address_to_bootloader_address(addr_t address, void **_result)
705{
706	TRACE(("%s: called\n", __func__));
707	// atari_m68k really doesn't need an address conversion
708	*_result = (void*)address;
709	return B_OK;
710}
711