1/*
2 * Copyright 2003-2005, Axel Dörfler, axeld@pinc-software.de.
3 * Distributed under the terms of the MIT License.
4 *
5 * Copyright 2001, Travis Geiselbrecht. All rights reserved.
6 * Distributed under the terms of the NewOS License.
7 */
8
9#include <KernelExport.h>
10
11#include <kernel.h>
12#include <boot/kernel_args.h>
13
14#include <vm/vm.h>
15#include <vm/VMAddressSpace.h>
16#include <arch/vm.h>
17#include <arch_mmu.h>
18
19
20//#define TRACE_ARCH_VM
21#ifdef TRACE_ARCH_VM
22#	define TRACE(x) dprintf x
23#else
24#	define TRACE(x) ;
25#endif
26
27
28status_t
29arch_vm_init(kernel_args *args)
30{
31	return B_OK;
32}
33
34
35status_t
36arch_vm_init2(kernel_args *args)
37{
38//	int bats[8];
39//	int i;
40
41#if 0
42	// print out any bat mappings
43	getibats(bats);
44	dprintf("ibats:\n");
45	for(i = 0; i < 4; i++)
46		dprintf("0x%x 0x%x\n", bats[i*2], bats[i*2+1]);
47	getdbats(bats);
48	dprintf("dbats:\n");
49	for(i = 0; i < 4; i++)
50		dprintf("0x%x 0x%x\n", bats[i*2], bats[i*2+1]);
51#endif
52
53#if 1
54	// turn off the first 2 BAT mappings (3 & 4 are used by the lower level code)
55	block_address_translation bat;
56	bat.Clear();
57
58	set_ibat0(&bat);
59	set_ibat1(&bat);
60	set_dbat0(&bat);
61	set_dbat1(&bat);
62/*	getibats(bats);
63	memset(bats, 0, 2 * 2);
64	setibats(bats);
65	getdbats(bats);
66	memset(bats, 0, 2 * 2);
67	setdbats(bats);
68*/
69#endif
70#if 0
71	// just clear the first BAT mapping (0 - 256MB)
72	dprintf("msr 0x%x\n", getmsr());
73	{
74		unsigned int reg;
75		asm("mr	%0,1" : "=r"(reg));
76		dprintf("sp 0x%x\n", reg);
77	}
78	dprintf("ka %p\n", ka);
79
80	getibats(bats);
81	dprintf("ibats:\n");
82	for(i = 0; i < 4; i++)
83		dprintf("0x%x 0x%x\n", bats[i*2], bats[i*2+1]);
84	bats[0] = bats[1] = 0;
85	setibats(bats);
86	getdbats(bats);
87	dprintf("dbats:\n");
88	for(i = 0; i < 4; i++)
89		dprintf("0x%x 0x%x\n", bats[i*2], bats[i*2+1]);
90	bats[0] = bats[1] = 0;
91	setdbats(bats);
92#endif
93	return B_OK;
94}
95
96
97status_t
98arch_vm_init_post_area(kernel_args *args)
99{
100	return B_OK;
101}
102
103
104status_t
105arch_vm_init_end(kernel_args *args)
106{
107	TRACE(("arch_vm_init_end(): %lu virtual ranges to keep:\n",
108		args->arch_args.num_virtual_ranges_to_keep));
109
110	for (int i = 0; i < (int)args->arch_args.num_virtual_ranges_to_keep; i++) {
111		addr_range &range = args->arch_args.virtual_ranges_to_keep[i];
112
113		TRACE(("  start: %p, size: 0x%lx\n", (void*)range.start, range.size));
114
115		// skip ranges outside the kernel address space
116		if (!IS_KERNEL_ADDRESS(range.start)) {
117			TRACE(("    no kernel address, skipping...\n"));
118			continue;
119		}
120
121		phys_addr_t physicalAddress;
122		void *address = (void*)range.start;
123		if (vm_get_page_mapping(VMAddressSpace::KernelID(), range.start,
124				&physicalAddress) != B_OK)
125			panic("arch_vm_init_end(): No page mapping for %p\n", address);
126		area_id area = vm_map_physical_memory(VMAddressSpace::KernelID(),
127			"boot loader reserved area", &address,
128			B_EXACT_ADDRESS, range.size,
129			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
130			physicalAddress, true);
131		if (area < 0) {
132			panic("arch_vm_init_end(): Failed to create area for boot loader "
133				"reserved area: %p - %p\n", (void*)range.start,
134				(void*)(range.start + range.size));
135		}
136	}
137
138	// Throw away any address space mappings we've inherited from the boot
139	// loader and have not yet turned into an area.
140	vm_free_unused_boot_loader_range(0, 0xffffffff - B_PAGE_SIZE + 1);
141
142	return B_OK;
143}
144
145
146status_t
147arch_vm_init_post_modules(kernel_args *args)
148{
149	return B_OK;
150}
151
152
153void
154arch_vm_aspace_swap(struct VMAddressSpace *from, struct VMAddressSpace *to)
155{
156}
157
158
159bool
160arch_vm_supports_protection(uint32 protection)
161{
162	return true;
163}
164
165
166void
167arch_vm_unset_memory_type(VMArea *area)
168{
169}
170
171
172status_t
173arch_vm_set_memory_type(VMArea *area, phys_addr_t physicalBase, uint32 type)
174{
175	if (type == 0)
176		return B_OK;
177
178	return B_OK;
179}
180