1/*
2 * Copyright 2007, Fran��ois Revol, revol@free.fr.
3 * Distributed under the terms of the MIT License.
4 *
5 * Copyright 2003-2005, Axel D��rfler, axeld@pinc-software.de.
6 * Distributed under the terms of the MIT License.
7 *
8 * Copyright 2001, Travis Geiselbrecht. All rights reserved.
9 * Distributed under the terms of the NewOS License.
10 */
11
12#include <KernelExport.h>
13
14#include <kernel.h>
15#include <boot/kernel_args.h>
16
17#include <vm/vm.h>
18#include <vm/vm_types.h>
19#include <vm/VMAddressSpace.h>
20#include <arch/vm.h>
21#include <arch_mmu.h>
22
23
24//#define TRACE_ARCH_VM
25#ifdef TRACE_ARCH_VM
26#	define TRACE(x) dprintf x
27#else
28#	define TRACE(x) ;
29#endif
30
31#warning M68K: WRITEME
32
33status_t
34arch_vm_init(kernel_args *args)
35{
36	return B_OK;
37}
38
39
40status_t
41arch_vm_init2(kernel_args *args)
42{
43//	int bats[8];
44//	int i;
45
46	/**/
47#warning M68K: disable TT0 and TT1, set up pmmu
48
49	return B_OK;
50}
51
52
53status_t
54arch_vm_init_post_area(kernel_args *args)
55{
56	return B_OK;
57}
58
59
60status_t
61arch_vm_init_end(kernel_args *args)
62{
63#if 0
64	TRACE(("arch_vm_init_end(): %lu virtual ranges to keep:\n",
65		args->arch_args.num_virtual_ranges_to_keep));
66
67	for (int i = 0; i < (int)args->arch_args.num_virtual_ranges_to_keep; i++) {
68		addr_range &range = args->arch_args.virtual_ranges_to_keep[i];
69
70		TRACE(("  start: %p, size: 0x%lx\n", (void*)range.start, range.size));
71
72		// skip ranges outside the kernel address space
73		if (!IS_KERNEL_ADDRESS(range.start)) {
74			TRACE(("    no kernel address, skipping...\n"));
75			continue;
76		}
77
78		void *address = (void*)range.start;
79		area_id area = create_area("boot loader reserved area", &address,
80			B_EXACT_ADDRESS, range.size, B_ALREADY_WIRED,
81			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
82		if (area < 0) {
83			panic("arch_vm_init_end(): Failed to create area for boot loader "
84				"reserved area: %p - %p\n", (void*)range.start,
85				(void*)(range.start + range.size));
86		}
87	}
88
89	// Throw away any address space mappings we've inherited from the boot
90	// loader and have not yet turned into an area.
91	vm_free_unused_boot_loader_range(0, 0xffffffff - B_PAGE_SIZE + 1);
92#endif
93
94#warning M68K: unset TT0 now
95	return B_OK;
96}
97
98
99status_t
100arch_vm_init_post_modules(kernel_args *args)
101{
102	return B_OK;
103}
104
105
106void
107arch_vm_aspace_swap(struct VMAddressSpace *from, struct VMAddressSpace *to)
108{
109	// This functions is only invoked when a userland thread is in the process
110	// of dying. It switches to the kernel team and does whatever cleanup is
111	// necessary (in case it is the team's main thread, it will delete the
112	// team).
113	// It is however not necessary to change the page directory. Userland team's
114	// page directories include all kernel mappings as well. Furthermore our
115	// arch specific translation map data objects are ref-counted, so they won't
116	// go away as long as they are still used on any CPU.
117}
118
119
120bool
121arch_vm_supports_protection(uint32 protection)
122{
123	return true;
124}
125
126
127void
128arch_vm_unset_memory_type(VMArea *area)
129{
130}
131
132
133status_t
134arch_vm_set_memory_type(VMArea *area, phys_addr_t physicalBase, uint32 type)
135{
136	if (type == 0)
137		return B_OK;
138
139	return B_ERROR;
140}
141