1/*
2 * Copyright 2010, Ithamar R. Adema, ithamar.adema@team-embedded.nl
3 * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
4 * Copyright 2002-2007, Axel D��rfler, axeld@pinc-software.de. All rights reserved.
5 * Distributed under the terms of the MIT License.
6 *
7 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
8 * Distributed under the terms of the NewOS License.
9 */
10
11
12#include <arch/vm_translation_map.h>
13
14#include <boot/kernel_args.h>
15
16#include "paging/32bit/ARMPagingMethod32Bit.h"
17//#include "paging/pae/ARMPagingMethodPAE.h"
18
19
20//#define TRACE_VM_TMAP
21#ifdef TRACE_VM_TMAP
22#	define TRACE(x...) dprintf(x)
23#else
24#	define TRACE(x...) ;
25#endif
26
27
28static union {
29	uint64	align;
30	char	thirty_two[sizeof(ARMPagingMethod32Bit)];
31	#if B_HAIKU_PHYSICAL_BITS == 64
32		char	pae[sizeof(ARMPagingMethodPAE)];
33	#endif
34} sPagingMethodBuffer;
35
36
37// #pragma mark - VM API
38
39
40status_t
41arch_vm_translation_map_create_map(bool kernel, VMTranslationMap** _map)
42{
43	return gARMPagingMethod->CreateTranslationMap(kernel, _map);
44}
45
46
47status_t
48arch_vm_translation_map_init(kernel_args *args,
49	VMPhysicalPageMapper** _physicalPageMapper)
50{
51	TRACE("vm_translation_map_init: entry\n");
52
53	#ifdef TRACE_VM_TMAP
54	TRACE("physical memory ranges:\n");
55	for (uint32 i = 0; i < args->num_physical_memory_ranges; i++) {
56		phys_addr_t start = args->physical_memory_range[i].start;
57		phys_addr_t end = start + args->physical_memory_range[i].size;
58		TRACE("  %#10" B_PRIxPHYSADDR " - %#10" B_PRIxPHYSADDR "\n", start,
59			end);
60	}
61
62	TRACE("allocated physical ranges:\n");
63	for (uint32 i = 0; i < args->num_physical_allocated_ranges; i++) {
64		phys_addr_t start = args->physical_allocated_range[i].start;
65		phys_addr_t end = start + args->physical_allocated_range[i].size;
66		TRACE("  %#10" B_PRIxPHYSADDR " - %#10" B_PRIxPHYSADDR "\n", start,
67			end);
68	}
69
70	TRACE("allocated virtual ranges:\n");
71	for (uint32 i = 0; i < args->num_virtual_allocated_ranges; i++) {
72		addr_t start = args->virtual_allocated_range[i].start;
73		addr_t end = start + args->virtual_allocated_range[i].size;
74		TRACE("  %#10" B_PRIxADDR " - %#10" B_PRIxADDR "\n", start, end);
75	}
76	#endif
77
78	//TODO: check for LPAE / long-descriptor format
79	//for now only short-descriptor format is implemented
80	gARMPagingMethod = new(&sPagingMethodBuffer) ARMPagingMethod32Bit;
81
82	return gARMPagingMethod->Init(args, _physicalPageMapper);
83}
84
85
86status_t
87arch_vm_translation_map_init_post_sem(kernel_args *args)
88{
89	return B_OK;
90}
91
92
93status_t
94arch_vm_translation_map_init_post_area(kernel_args *args)
95{
96	TRACE("vm_translation_map_init_post_area: entry\n");
97
98	return gARMPagingMethod->InitPostArea(args);
99}
100
101
102status_t
103arch_vm_translation_map_early_map(kernel_args *args, addr_t va, phys_addr_t pa,
104	uint8 attributes, phys_addr_t (*get_free_page)(kernel_args *))
105{
106	TRACE("early_tmap: entry pa 0x%lx va 0x%lx\n", pa, va);
107
108	return gARMPagingMethod->MapEarly(args, va, pa, attributes, get_free_page);
109}
110
111
112/*!	Verifies that the page at the given virtual address can be accessed in the
113	current context.
114
115	This function is invoked in the kernel debugger. Paranoid checking is in
116	order.
117
118	\param virtualAddress The virtual address to be checked.
119	\param protection The area protection for which to check. Valid is a bitwise
120		or of one or more of \c B_KERNEL_READ_AREA or \c B_KERNEL_WRITE_AREA.
121	\return \c true, if the address can be accessed in all ways specified by
122		\a protection, \c false otherwise.
123*/
124bool
125arch_vm_translation_map_is_kernel_page_accessible(addr_t virtualAddress,
126	uint32 protection)
127{
128	if (!gARMPagingMethod)
129		return true;
130
131	return gARMPagingMethod->IsKernelPageAccessible(virtualAddress, protection);
132}
133