1/*
2 * Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2003-2007, Axel D��rfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
5 *
6 * Copyright 2001, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
8 */
9
10/*	(bonefish) Some explanatory words on how address translation is implemented
11	for the 32 bit PPC architecture.
12
13	I use the address type nomenclature as used in the PPC architecture
14	specs, i.e.
15	- effective address: An address as used by program instructions, i.e.
16	  that's what elsewhere (e.g. in the VM implementation) is called
17	  virtual address.
18	- virtual address: An intermediate address computed from the effective
19	  address via the segment registers.
20	- physical address: An address referring to physical storage.
21
22	The hardware translates an effective address to a physical address using
23	either of two mechanisms: 1) Block Address Translation (BAT) or
24	2) segment + page translation. The first mechanism does this directly
25	using two sets (for data/instructions) of special purpose registers.
26	The latter mechanism is of more relevance here, though:
27
28	effective address (32 bit):	     [ 0 ESID  3 | 4  PIX 19 | 20 Byte 31 ]
29								           |           |            |
30							     (segment registers)   |            |
31									       |           |            |
32	virtual address (52 bit):   [ 0      VSID 23 | 24 PIX 39 | 40 Byte 51 ]
33	                            [ 0             VPN       39 | 40 Byte 51 ]
34								                 |                  |
35										   (page table)             |
36											     |                  |
37	physical address (32 bit):       [ 0        PPN       19 | 20 Byte 31 ]
38
39
40	ESID: Effective Segment ID
41	VSID: Virtual Segment ID
42	PIX:  Page Index
43	VPN:  Virtual Page Number
44	PPN:  Physical Page Number
45
46
47	Unlike on x86 we can't just switch the context to another team by just
48	setting a register to another page directory, since we only have one
49	page table containing both kernel and user address mappings. Instead we
50	map the effective address space of kernel and *all* teams
51	non-intersectingly into the virtual address space (which fortunately is
52	20 bits wider), and use the segment registers to select the section of
53	the virtual address space for the current team. Half of the 16 segment
54	registers (8 - 15) map the kernel addresses, so they remain unchanged.
55
56	The range of the virtual address space a team's effective address space
57	is mapped to is defined by its PPCVMTranslationMap::fVSIDBase,
58	which is the first of the 8 successive VSID values used for the team.
59
60	Which fVSIDBase values are already taken is defined by the set bits in
61	the bitmap sVSIDBaseBitmap.
62
63
64	TODO:
65	* If we want to continue to use the OF services, we would need to add
66	  its address mappings to the kernel space. Unfortunately some stuff
67	  (especially RAM) is mapped in an address range without the kernel
68	  address space. We probably need to map those into each team's address
69	  space as kernel read/write areas.
70	* The current locking scheme is insufficient. The page table is a resource
71	  shared by all teams. We need to synchronize access to it. Probably via a
72	  spinlock.
73 */
74
75#include <arch/vm_translation_map.h>
76
77#include <stdlib.h>
78
79#include <KernelExport.h>
80
81#include <arch/cpu.h>
82//#include <arch_mmu.h>
83#include <boot/kernel_args.h>
84#include <int.h>
85#include <kernel.h>
86#include <slab/Slab.h>
87#include <vm/vm.h>
88#include <vm/vm_page.h>
89#include <vm/vm_priv.h>
90#include <vm/VMAddressSpace.h>
91#include <vm/VMCache.h>
92
93#include <util/AutoLock.h>
94
95#include "generic_vm_physical_page_mapper.h"
96//#include "generic_vm_physical_page_ops.h"
97//#include "GenericVMPhysicalPageMapper.h"
98
99#include "paging/PPCVMTranslationMap.h"
100#include "paging/classic/PPCPagingMethodClassic.h"
101//#include "paging/460/PPCPagingMethod460.h"
102
103
104#define TRACE_VM_TMAP
105#ifdef TRACE_VM_TMAP
106#	define TRACE(x...) dprintf(x)
107#else
108#	define TRACE(x...) ;
109#endif
110
111
112static union {
113	uint64	align;
114	//char	amcc460[sizeof(PPCPagingMethod460)];
115	char	classic[sizeof(PPCPagingMethodClassic)];
116} sPagingMethodBuffer;
117
118
119#if 0
120struct PPCVMTranslationMap : VMTranslationMap {
121								PPCVMTranslationMap();
122	virtual						~PPCVMTranslationMap();
123
124			status_t			Init(bool kernel);
125
126	inline	int					VSIDBase() const	{ return fVSIDBase; }
127
128			page_table_entry*	LookupPageTableEntry(addr_t virtualAddress);
129			bool				RemovePageTableEntry(addr_t virtualAddress);
130
131	virtual	bool	 			Lock();
132	virtual	void				Unlock();
133
134	virtual	addr_t				MappedSize() const;
135	virtual	size_t				MaxPagesNeededToMap(addr_t start,
136									addr_t end) const;
137
138	virtual	status_t			Map(addr_t virtualAddress,
139									phys_addr_t physicalAddress,
140									uint32 attributes, uint32 memoryType,
141									vm_page_reservation* reservation);
142	virtual	status_t			Unmap(addr_t start, addr_t end);
143
144	virtual	status_t			UnmapPage(VMArea* area, addr_t address,
145									bool updatePageQueue);
146
147	virtual	status_t			Query(addr_t virtualAddress,
148									phys_addr_t* _physicalAddress,
149									uint32* _flags);
150	virtual	status_t			QueryInterrupt(addr_t virtualAddress,
151									phys_addr_t* _physicalAddress,
152									uint32* _flags);
153
154	virtual	status_t			Protect(addr_t base, addr_t top,
155									uint32 attributes, uint32 memoryType);
156	virtual	status_t			ClearFlags(addr_t virtualAddress,
157									uint32 flags);
158
159	virtual	bool				ClearAccessedAndModified(
160									VMArea* area, addr_t address,
161									bool unmapIfUnaccessed,
162									bool& _modified);
163
164	virtual	void				Flush();
165
166protected:
167			int					fVSIDBase;
168};
169#endif
170
171
172void
173ppc_translation_map_change_asid(VMTranslationMap *map)
174{
175	static_cast<PPCVMTranslationMap*>(map)->ChangeASID();
176}
177
178
179// #pragma mark -
180
181
182#if 0//XXX:Not needed anymore ?
183addr_t
184PPCVMTranslationMap::MappedSize() const
185{
186	return fMapCount;
187}
188
189
190static status_t
191get_physical_page_tmap(phys_addr_t physicalAddress, addr_t *_virtualAddress,
192	void **handle)
193{
194	return generic_get_physical_page(physicalAddress, _virtualAddress, 0);
195}
196
197
198static status_t
199put_physical_page_tmap(addr_t virtualAddress, void *handle)
200{
201	return generic_put_physical_page(virtualAddress);
202}
203#endif
204
205
206//  #pragma mark -
207//  VM API
208
209
210status_t
211arch_vm_translation_map_create_map(bool kernel, VMTranslationMap** _map)
212{
213	return gPPCPagingMethod->CreateTranslationMap(kernel, _map);
214}
215
216
217status_t
218arch_vm_translation_map_init(kernel_args *args,
219	VMPhysicalPageMapper** _physicalPageMapper)
220{
221	TRACE("vm_translation_map_init: entry\n");
222
223#ifdef TRACE_VM_TMAP
224	TRACE("physical memory ranges:\n");
225	for (uint32 i = 0; i < args->num_physical_memory_ranges; i++) {
226		phys_addr_t start = args->physical_memory_range[i].start;
227		phys_addr_t end = start + args->physical_memory_range[i].size;
228		TRACE("  %#10" B_PRIxPHYSADDR " - %#10" B_PRIxPHYSADDR "\n", start,
229			end);
230	}
231
232	TRACE("allocated physical ranges:\n");
233	for (uint32 i = 0; i < args->num_physical_allocated_ranges; i++) {
234		phys_addr_t start = args->physical_allocated_range[i].start;
235		phys_addr_t end = start + args->physical_allocated_range[i].size;
236		TRACE("  %#10" B_PRIxPHYSADDR " - %#10" B_PRIxPHYSADDR "\n", start,
237			end);
238	}
239
240	TRACE("allocated virtual ranges:\n");
241	for (uint32 i = 0; i < args->num_virtual_allocated_ranges; i++) {
242		addr_t start = args->virtual_allocated_range[i].start;
243		addr_t end = start + args->virtual_allocated_range[i].size;
244		TRACE("  %#10" B_PRIxADDR " - %#10" B_PRIxADDR "\n", start, end);
245	}
246#endif
247
248	if (false /* TODO:Check for AMCC460! */) {
249		dprintf("using AMCC 460 paging\n");
250		panic("XXX");
251		//XXX:gPPCPagingMethod = new(&sPagingMethodBuffer) PPCPagingMethod460;
252	} else {
253		dprintf("using Classic paging\n");
254		gPPCPagingMethod = new(&sPagingMethodBuffer) PPCPagingMethodClassic;
255	}
256
257	return gPPCPagingMethod->Init(args, _physicalPageMapper);
258}
259
260
261status_t
262arch_vm_translation_map_init_post_area(kernel_args *args)
263{
264	TRACE("vm_translation_map_init_post_area: entry\n");
265
266	return gPPCPagingMethod->InitPostArea(args);
267}
268
269
270status_t
271arch_vm_translation_map_init_post_sem(kernel_args *args)
272{
273	// init physical page mapper
274	return generic_vm_physical_page_mapper_init_post_sem(args);
275}
276
277
278/**	Directly maps a page without having knowledge of any kernel structures.
279 *	Used only during VM setup.
280 *	It currently ignores the "attributes" parameter and sets all pages
281 *	read/write.
282 */
283
284status_t
285arch_vm_translation_map_early_map(kernel_args *args, addr_t va, phys_addr_t pa,
286	uint8 attributes, phys_addr_t (*get_free_page)(kernel_args *))
287{
288	TRACE("early_tmap: entry pa %#" B_PRIxPHYSADDR " va %#" B_PRIxADDR "\n", pa,
289		va);
290
291	return gPPCPagingMethod->MapEarly(args, va, pa, attributes, get_free_page);
292}
293
294
295// XXX currently assumes this translation map is active
296
297status_t
298arch_vm_translation_map_early_query(addr_t va, phys_addr_t *out_physical)
299{
300	//PANIC_UNIMPLEMENTED();
301	panic("vm_translation_map_early_query(): not yet implemented\n");
302	return B_OK;
303}
304
305
306// #pragma mark -
307
308
309status_t
310ppc_map_address_range(addr_t virtualAddress, phys_addr_t physicalAddress,
311	size_t size)
312{
313	addr_t virtualEnd = ROUNDUP(virtualAddress + size, B_PAGE_SIZE);
314	virtualAddress = ROUNDDOWN(virtualAddress, B_PAGE_SIZE);
315	physicalAddress = ROUNDDOWN(physicalAddress, B_PAGE_SIZE);
316
317	VMAddressSpace *addressSpace = VMAddressSpace::Kernel();
318	PPCVMTranslationMap* map = static_cast<PPCVMTranslationMap*>(
319		addressSpace->TranslationMap());
320
321	vm_page_reservation reservation;
322	vm_page_reserve_pages(&reservation, 0, VM_PRIORITY_USER);
323		// We don't need any pages for mapping.
324
325	// map the pages
326	for (; virtualAddress < virtualEnd;
327		 virtualAddress += B_PAGE_SIZE, physicalAddress += B_PAGE_SIZE) {
328		status_t error = map->Map(virtualAddress, physicalAddress,
329			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, &reservation);
330		if (error != B_OK)
331			return error;
332	}
333
334	return B_OK;
335}
336
337
338void
339ppc_unmap_address_range(addr_t virtualAddress, size_t size)
340{
341	addr_t virtualEnd = ROUNDUP(virtualAddress + size, B_PAGE_SIZE);
342	virtualAddress = ROUNDDOWN(virtualAddress, B_PAGE_SIZE);
343
344	VMAddressSpace *addressSpace = VMAddressSpace::Kernel();
345
346	PPCVMTranslationMap* map = static_cast<PPCVMTranslationMap*>(
347		addressSpace->TranslationMap());
348	map->Unmap(virtualAddress, virtualEnd);
349}
350
351
352status_t
353ppc_remap_address_range(addr_t *_virtualAddress, size_t size, bool unmap)
354{
355	VMAddressSpace *addressSpace = VMAddressSpace::Kernel();
356
357	PPCVMTranslationMap* map = static_cast<PPCVMTranslationMap*>(
358		addressSpace->TranslationMap());
359
360	return map->RemapAddressRange(_virtualAddress, size, unmap);
361}
362
363
364bool
365arch_vm_translation_map_is_kernel_page_accessible(addr_t virtualAddress,
366	uint32 protection)
367{
368	if (!gPPCPagingMethod)
369		return true;
370
371	return gPPCPagingMethod->IsKernelPageAccessible(virtualAddress, protection);
372}
373