1/*
2 * Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2003-2007, Axel Dörfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
5 *
6 * Copyright 2001, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
8 */
9
10/*	(bonefish) Some explanatory words on how address translation is implemented
11	for the 32 bit PPC architecture.
12
13	I use the address type nomenclature as used in the PPC architecture
14	specs, i.e.
15	- effective address: An address as used by program instructions, i.e.
16	  that's what elsewhere (e.g. in the VM implementation) is called
17	  virtual address.
18	- virtual address: An intermediate address computed from the effective
19	  address via the segment registers.
20	- physical address: An address referring to physical storage.
21
22	The hardware translates an effective address to a physical address using
23	either of two mechanisms: 1) Block Address Translation (BAT) or
24	2) segment + page translation. The first mechanism does this directly
25	using two sets (for data/instructions) of special purpose registers.
26	The latter mechanism is of more relevance here, though:
27
28	effective address (32 bit):	     [ 0 ESID  3 | 4  PIX 19 | 20 Byte 31 ]
29								           |           |            |
30							     (segment registers)   |            |
31									       |           |            |
32	virtual address (52 bit):   [ 0      VSID 23 | 24 PIX 39 | 40 Byte 51 ]
33	                            [ 0             VPN       39 | 40 Byte 51 ]
34								                 |                  |
35										   (page table)             |
36											     |                  |
37	physical address (32 bit):       [ 0        PPN       19 | 20 Byte 31 ]
38
39
40	ESID: Effective Segment ID
41	VSID: Virtual Segment ID
42	PIX:  Page Index
43	VPN:  Virtual Page Number
44	PPN:  Physical Page Number
45
46
47	Unlike on x86 we can't just switch the context to another team by just
48	setting a register to another page directory, since we only have one
49	page table containing both kernel and user address mappings. Instead we
50	map the effective address space of kernel and *all* teams
51	non-intersectingly into the virtual address space (which fortunately is
52	20 bits wider), and use the segment registers to select the section of
53	the virtual address space for the current team. Half of the 16 segment
54	registers (8 - 15) map the kernel addresses, so they remain unchanged.
55
56	The range of the virtual address space a team's effective address space
57	is mapped to is defined by its PPCVMTranslationMap::fVSIDBase,
58	which is the first of the 8 successive VSID values used for the team.
59
60	Which fVSIDBase values are already taken is defined by the set bits in
61	the bitmap sVSIDBaseBitmap.
62
63
64	TODO:
65	* If we want to continue to use the OF services, we would need to add
66	  its address mappings to the kernel space. Unfortunately some stuff
67	  (especially RAM) is mapped in an address range without the kernel
68	  address space. We probably need to map those into each team's address
69	  space as kernel read/write areas.
70	* The current locking scheme is insufficient. The page table is a resource
71	  shared by all teams. We need to synchronize access to it. Probably via a
72	  spinlock.
73 */
74
75#include <arch/vm_translation_map.h>
76
77#include <stdlib.h>
78
79#include <KernelExport.h>
80
81#include <arch/cpu.h>
82#include <arch_mmu.h>
83#include <boot/kernel_args.h>
84#include <int.h>
85#include <kernel.h>
86#include <slab/Slab.h>
87#include <vm/vm.h>
88#include <vm/vm_page.h>
89#include <vm/vm_priv.h>
90#include <vm/VMAddressSpace.h>
91#include <vm/VMCache.h>
92
93#include <util/AutoLock.h>
94
95#include "generic_vm_physical_page_mapper.h"
96#include "generic_vm_physical_page_ops.h"
97#include "GenericVMPhysicalPageMapper.h"
98
99
100static struct page_table_entry_group *sPageTable;
101static size_t sPageTableSize;
102static uint32 sPageTableHashMask;
103static area_id sPageTableArea;
104
105// 64 MB of iospace
106#define IOSPACE_SIZE (64*1024*1024)
107// We only have small (4 KB) pages. The only reason for choosing greater chunk
108// size is to keep the waste of memory limited, since the generic page mapper
109// allocates structures per physical/virtual chunk.
110// TODO: Implement a page mapper more suitable for small pages!
111#define IOSPACE_CHUNK_SIZE (16 * B_PAGE_SIZE)
112
113static addr_t sIOSpaceBase;
114
115static GenericVMPhysicalPageMapper sPhysicalPageMapper;
116
117// The VSID is a 24 bit number. The lower three bits are defined by the
118// (effective) segment number, which leaves us with a 21 bit space of
119// VSID bases (= 2 * 1024 * 1024).
120#define MAX_VSID_BASES (PAGE_SIZE * 8)
121static uint32 sVSIDBaseBitmap[MAX_VSID_BASES / (sizeof(uint32) * 8)];
122static spinlock sVSIDBaseBitmapLock;
123
124#define VSID_BASE_SHIFT 3
125#define VADDR_TO_VSID(vsidBase, vaddr) (vsidBase + ((vaddr) >> 28))
126
127
128struct PPCVMTranslationMap : VMTranslationMap {
129								PPCVMTranslationMap();
130	virtual						~PPCVMTranslationMap();
131
132			status_t			Init(bool kernel);
133
134	inline	int					VSIDBase() const	{ return fVSIDBase; }
135
136			page_table_entry*	LookupPageTableEntry(addr_t virtualAddress);
137			bool				RemovePageTableEntry(addr_t virtualAddress);
138
139	virtual	bool	 			Lock();
140	virtual	void				Unlock();
141
142	virtual	addr_t				MappedSize() const;
143	virtual	size_t				MaxPagesNeededToMap(addr_t start,
144									addr_t end) const;
145
146	virtual	status_t			Map(addr_t virtualAddress,
147									phys_addr_t physicalAddress,
148									uint32 attributes, uint32 memoryType,
149									vm_page_reservation* reservation);
150	virtual	status_t			Unmap(addr_t start, addr_t end);
151
152	virtual	status_t			UnmapPage(VMArea* area, addr_t address,
153									bool updatePageQueue);
154
155	virtual	status_t			Query(addr_t virtualAddress,
156									phys_addr_t* _physicalAddress,
157									uint32* _flags);
158	virtual	status_t			QueryInterrupt(addr_t virtualAddress,
159									phys_addr_t* _physicalAddress,
160									uint32* _flags);
161
162	virtual	status_t			Protect(addr_t base, addr_t top,
163									uint32 attributes, uint32 memoryType);
164	virtual	status_t			ClearFlags(addr_t virtualAddress,
165									uint32 flags);
166
167	virtual	bool				ClearAccessedAndModified(
168									VMArea* area, addr_t address,
169									bool unmapIfUnaccessed,
170									bool& _modified);
171
172	virtual	void				Flush();
173
174protected:
175			int					fVSIDBase;
176};
177
178
179void
180ppc_translation_map_change_asid(VMTranslationMap *map)
181{
182// this code depends on the kernel being at 0x80000000, fix if we change that
183#if KERNEL_BASE != 0x80000000
184#error fix me
185#endif
186	int vsidBase = static_cast<PPCVMTranslationMap*>(map)->VSIDBase();
187
188	isync();	// synchronize context
189	asm("mtsr	0,%0" : : "g"(vsidBase));
190	asm("mtsr	1,%0" : : "g"(vsidBase + 1));
191	asm("mtsr	2,%0" : : "g"(vsidBase + 2));
192	asm("mtsr	3,%0" : : "g"(vsidBase + 3));
193	asm("mtsr	4,%0" : : "g"(vsidBase + 4));
194	asm("mtsr	5,%0" : : "g"(vsidBase + 5));
195	asm("mtsr	6,%0" : : "g"(vsidBase + 6));
196	asm("mtsr	7,%0" : : "g"(vsidBase + 7));
197	isync();	// synchronize context
198}
199
200
201static void
202fill_page_table_entry(page_table_entry *entry, uint32 virtualSegmentID,
203	addr_t virtualAddress, phys_addr_t physicalAddress, uint8 protection,
204	uint32 memoryType, bool secondaryHash)
205{
206	// lower 32 bit - set at once
207	entry->physical_page_number = physicalAddress / B_PAGE_SIZE;
208	entry->_reserved0 = 0;
209	entry->referenced = false;
210	entry->changed = false;
211	entry->write_through = (memoryType == B_MTR_UC) || (memoryType == B_MTR_WT);
212	entry->caching_inhibited = (memoryType == B_MTR_UC);
213	entry->memory_coherent = false;
214	entry->guarded = false;
215	entry->_reserved1 = 0;
216	entry->page_protection = protection & 0x3;
217	eieio();
218		// we need to make sure that the lower 32 bit were
219		// already written when the entry becomes valid
220
221	// upper 32 bit
222	entry->virtual_segment_id = virtualSegmentID;
223	entry->secondary_hash = secondaryHash;
224	entry->abbr_page_index = (virtualAddress >> 22) & 0x3f;
225	entry->valid = true;
226
227	ppc_sync();
228}
229
230
231page_table_entry *
232PPCVMTranslationMap::LookupPageTableEntry(addr_t virtualAddress)
233{
234	// lookup the vsid based off the va
235	uint32 virtualSegmentID = VADDR_TO_VSID(fVSIDBase, virtualAddress);
236
237//	dprintf("vm_translation_map.lookup_page_table_entry: vsid %ld, va 0x%lx\n", virtualSegmentID, virtualAddress);
238
239	// Search for the page table entry using the primary hash value
240
241	uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, virtualAddress);
242	page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
243
244	for (int i = 0; i < 8; i++) {
245		page_table_entry *entry = &group->entry[i];
246
247		if (entry->virtual_segment_id == virtualSegmentID
248			&& entry->secondary_hash == false
249			&& entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f))
250			return entry;
251	}
252
253	// didn't find it, try the secondary hash value
254
255	hash = page_table_entry::SecondaryHash(hash);
256	group = &sPageTable[hash & sPageTableHashMask];
257
258	for (int i = 0; i < 8; i++) {
259		page_table_entry *entry = &group->entry[i];
260
261		if (entry->virtual_segment_id == virtualSegmentID
262			&& entry->secondary_hash == true
263			&& entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f))
264			return entry;
265	}
266
267	return NULL;
268}
269
270
271bool
272PPCVMTranslationMap::RemovePageTableEntry(addr_t virtualAddress)
273{
274	page_table_entry *entry = LookupPageTableEntry(virtualAddress);
275	if (entry == NULL)
276		return false;
277
278	entry->valid = 0;
279	ppc_sync();
280	tlbie(virtualAddress);
281	eieio();
282	tlbsync();
283	ppc_sync();
284
285	return true;
286}
287
288
289static status_t
290map_iospace_chunk(addr_t va, phys_addr_t pa, uint32 flags)
291{
292	pa &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned
293	va &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned
294	if (va < sIOSpaceBase || va >= (sIOSpaceBase + IOSPACE_SIZE))
295		panic("map_iospace_chunk: passed invalid va 0x%lx\n", va);
296
297	// map the pages
298	return ppc_map_address_range(va, pa, IOSPACE_CHUNK_SIZE);
299}
300
301
302// #pragma mark -
303
304
305PPCVMTranslationMap::PPCVMTranslationMap()
306{
307}
308
309
310PPCVMTranslationMap::~PPCVMTranslationMap()
311{
312	if (fMapCount > 0) {
313		panic("vm_translation_map.destroy_tmap: map %p has positive map count %ld\n",
314			this, fMapCount);
315	}
316
317	// mark the vsid base not in use
318	int baseBit = fVSIDBase >> VSID_BASE_SHIFT;
319	atomic_and((vint32 *)&sVSIDBaseBitmap[baseBit / 32],
320			~(1 << (baseBit % 32)));
321}
322
323
324status_t
325PPCVMTranslationMap::Init(bool kernel)
326{
327	cpu_status state = disable_interrupts();
328	acquire_spinlock(&sVSIDBaseBitmapLock);
329
330	// allocate a VSID base for this one
331	if (kernel) {
332		// The boot loader has set up the segment registers for identical
333		// mapping. Two VSID bases are reserved for the kernel: 0 and 8. The
334		// latter one for mapping the kernel address space (0x80000000...), the
335		// former one for the lower addresses required by the Open Firmware
336		// services.
337		fVSIDBase = 0;
338		sVSIDBaseBitmap[0] |= 0x3;
339	} else {
340		int i = 0;
341
342		while (i < MAX_VSID_BASES) {
343			if (sVSIDBaseBitmap[i / 32] == 0xffffffff) {
344				i += 32;
345				continue;
346			}
347			if ((sVSIDBaseBitmap[i / 32] & (1 << (i % 32))) == 0) {
348				// we found it
349				sVSIDBaseBitmap[i / 32] |= 1 << (i % 32);
350				break;
351			}
352			i++;
353		}
354		if (i >= MAX_VSID_BASES)
355			panic("vm_translation_map_create: out of VSID bases\n");
356		fVSIDBase = i << VSID_BASE_SHIFT;
357	}
358
359	release_spinlock(&sVSIDBaseBitmapLock);
360	restore_interrupts(state);
361
362	return B_OK;
363}
364
365
366bool
367PPCVMTranslationMap::Lock()
368{
369	recursive_lock_lock(&fLock);
370	return true;
371}
372
373
374void
375PPCVMTranslationMap::Unlock()
376{
377	recursive_lock_unlock(&fLock);
378}
379
380
381size_t
382PPCVMTranslationMap::MaxPagesNeededToMap(addr_t start, addr_t end) const
383{
384	return 0;
385}
386
387
388status_t
389PPCVMTranslationMap::Map(addr_t virtualAddress, phys_addr_t physicalAddress,
390	uint32 attributes, uint32 memoryType, vm_page_reservation* reservation)
391{
392	// lookup the vsid based off the va
393	uint32 virtualSegmentID = VADDR_TO_VSID(fVSIDBase, virtualAddress);
394	uint32 protection = 0;
395
396	// ToDo: check this
397	// all kernel mappings are R/W to supervisor code
398	if (attributes & (B_READ_AREA | B_WRITE_AREA))
399		protection = (attributes & B_WRITE_AREA) ? PTE_READ_WRITE : PTE_READ_ONLY;
400
401	//dprintf("vm_translation_map.map_tmap: vsid %d, pa 0x%lx, va 0x%lx\n", vsid, pa, va);
402
403	// Search for a free page table slot using the primary hash value
404
405	uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, virtualAddress);
406	page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
407
408	for (int i = 0; i < 8; i++) {
409		page_table_entry *entry = &group->entry[i];
410
411		if (entry->valid)
412			continue;
413
414		fill_page_table_entry(entry, virtualSegmentID, virtualAddress, physicalAddress,
415			protection, memoryType, false);
416		fMapCount++;
417		return B_OK;
418	}
419
420	// Didn't found one, try the secondary hash value
421
422	hash = page_table_entry::SecondaryHash(hash);
423	group = &sPageTable[hash & sPageTableHashMask];
424
425	for (int i = 0; i < 8; i++) {
426		page_table_entry *entry = &group->entry[i];
427
428		if (entry->valid)
429			continue;
430
431		fill_page_table_entry(entry, virtualSegmentID, virtualAddress, physicalAddress,
432			protection, memoryType, false);
433		fMapCount++;
434		return B_OK;
435	}
436
437	panic("vm_translation_map.map_tmap: hash table full\n");
438	return B_ERROR;
439}
440
441
442status_t
443PPCVMTranslationMap::Unmap(addr_t start, addr_t end)
444{
445	page_table_entry *entry;
446
447	start = ROUNDDOWN(start, B_PAGE_SIZE);
448	end = ROUNDUP(end, B_PAGE_SIZE);
449
450//	dprintf("vm_translation_map.unmap_tmap: start 0x%lx, end 0x%lx\n", start, end);
451
452	while (start < end) {
453		if (RemovePageTableEntry(start))
454			fMapCount--;
455
456		start += B_PAGE_SIZE;
457	}
458
459	return B_OK;
460}
461
462
463status_t
464PPCVMTranslationMap::UnmapPage(VMArea* area, addr_t address,
465	bool updatePageQueue)
466{
467	ASSERT(address % B_PAGE_SIZE == 0);
468
469	RecursiveLocker locker(fLock);
470
471	if (area->cache_type == CACHE_TYPE_DEVICE) {
472		if (!RemovePageTableEntry(address))
473			return B_ENTRY_NOT_FOUND;
474
475		fMapCount--;
476		return B_OK;
477	}
478
479	page_table_entry* entry = LookupPageTableEntry(address);
480	if (entry == NULL)
481		return B_ENTRY_NOT_FOUND;
482
483	page_num_t pageNumber = entry->physical_page_number;
484	bool accessed = entry->referenced;
485	bool modified = entry->changed;
486
487	RemovePageTableEntry(address);
488
489	fMapCount--;
490
491	locker.Detach();
492		// PageUnmapped() will unlock for us
493
494	PageUnmapped(area, pageNumber, accessed, modified, updatePageQueue);
495
496	return B_OK;
497}
498
499
500status_t
501PPCVMTranslationMap::Query(addr_t va, phys_addr_t *_outPhysical,
502	uint32 *_outFlags)
503{
504	page_table_entry *entry;
505
506	// default the flags to not present
507	*_outFlags = 0;
508	*_outPhysical = 0;
509
510	entry = LookupPageTableEntry(va);
511	if (entry == NULL)
512		return B_NO_ERROR;
513
514	// ToDo: check this!
515	if (IS_KERNEL_ADDRESS(va))
516		*_outFlags |= B_KERNEL_READ_AREA | (entry->page_protection == PTE_READ_ONLY ? 0 : B_KERNEL_WRITE_AREA);
517	else
518		*_outFlags |= B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_READ_AREA | (entry->page_protection == PTE_READ_ONLY ? 0 : B_WRITE_AREA);
519
520	*_outFlags |= entry->changed ? PAGE_MODIFIED : 0;
521	*_outFlags |= entry->referenced ? PAGE_ACCESSED : 0;
522	*_outFlags |= entry->valid ? PAGE_PRESENT : 0;
523
524	*_outPhysical = entry->physical_page_number * B_PAGE_SIZE;
525
526	return B_OK;
527}
528
529
530status_t
531PPCVMTranslationMap::QueryInterrupt(addr_t virtualAddress,
532	phys_addr_t* _physicalAddress, uint32* _flags)
533{
534	return PPCVMTranslationMap::Query(virtualAddress, _physicalAddress, _flags);
535}
536
537
538addr_t
539PPCVMTranslationMap::MappedSize() const
540{
541	return fMapCount;
542}
543
544
545status_t
546PPCVMTranslationMap::Protect(addr_t base, addr_t top, uint32 attributes,
547	uint32 memoryType)
548{
549	// XXX finish
550	return B_ERROR;
551}
552
553
554status_t
555PPCVMTranslationMap::ClearFlags(addr_t virtualAddress, uint32 flags)
556{
557	page_table_entry *entry = LookupPageTableEntry(virtualAddress);
558	if (entry == NULL)
559		return B_NO_ERROR;
560
561	bool modified = false;
562
563	// clear the bits
564	if (flags & PAGE_MODIFIED && entry->changed) {
565		entry->changed = false;
566		modified = true;
567	}
568	if (flags & PAGE_ACCESSED && entry->referenced) {
569		entry->referenced = false;
570		modified = true;
571	}
572
573	// synchronize
574	if (modified) {
575		tlbie(virtualAddress);
576		eieio();
577		tlbsync();
578		ppc_sync();
579	}
580
581	return B_OK;
582}
583
584
585bool
586PPCVMTranslationMap::ClearAccessedAndModified(VMArea* area, addr_t address,
587	bool unmapIfUnaccessed, bool& _modified)
588{
589	// TODO: Implement for real! ATM this is just an approximation using
590	// Query(), ClearFlags(), and UnmapPage(). See below!
591
592	RecursiveLocker locker(fLock);
593
594	uint32 flags;
595	phys_addr_t physicalAddress;
596	if (Query(address, &physicalAddress, &flags) != B_OK
597		|| (flags & PAGE_PRESENT) == 0) {
598		return false;
599	}
600
601	_modified = (flags & PAGE_MODIFIED) != 0;
602
603	if ((flags & (PAGE_ACCESSED | PAGE_MODIFIED)) != 0)
604		ClearFlags(address, flags & (PAGE_ACCESSED | PAGE_MODIFIED));
605
606	if ((flags & PAGE_ACCESSED) != 0)
607		return true;
608
609	if (!unmapIfUnaccessed)
610		return false;
611
612	locker.Unlock();
613
614	UnmapPage(area, address, false);
615		// TODO: Obvious race condition: Between querying and unmapping the
616		// page could have been accessed. We try to compensate by considering
617		// vm_page::{accessed,modified} (which would have been updated by
618		// UnmapPage()) below, but that doesn't quite match the required
619		// semantics of the method.
620
621	vm_page* page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
622	if (page == NULL)
623		return false;
624
625	_modified |= page->modified;
626
627	return page->accessed;
628}
629
630
631void
632PPCVMTranslationMap::Flush()
633{
634// TODO: arch_cpu_global_TLB_invalidate() is extremely expensive and doesn't
635// even cut it here. We are supposed to invalidate all TLB entries for this
636// map on all CPUs. We should loop over the virtual pages and invoke tlbie
637// instead (which marks the entry invalid on all CPUs).
638	arch_cpu_global_TLB_invalidate();
639}
640
641
642static status_t
643get_physical_page_tmap(phys_addr_t physicalAddress, addr_t *_virtualAddress,
644	void **handle)
645{
646	return generic_get_physical_page(physicalAddress, _virtualAddress, 0);
647}
648
649
650static status_t
651put_physical_page_tmap(addr_t virtualAddress, void *handle)
652{
653	return generic_put_physical_page(virtualAddress);
654}
655
656
657//  #pragma mark -
658//  VM API
659
660
661status_t
662arch_vm_translation_map_create_map(bool kernel, VMTranslationMap** _map)
663{
664	PPCVMTranslationMap* map = new(std::nothrow) PPCVMTranslationMap;
665	if (map == NULL)
666		return B_NO_MEMORY;
667
668	status_t error = map->Init(kernel);
669	if (error != B_OK) {
670		delete map;
671		return error;
672	}
673
674	*_map = map;
675	return B_OK;
676}
677
678
679status_t
680arch_vm_translation_map_init(kernel_args *args,
681	VMPhysicalPageMapper** _physicalPageMapper)
682{
683	sPageTable = (page_table_entry_group *)args->arch_args.page_table.start;
684	sPageTableSize = args->arch_args.page_table.size;
685	sPageTableHashMask = sPageTableSize / sizeof(page_table_entry_group) - 1;
686
687	// init physical page mapper
688	status_t error = generic_vm_physical_page_mapper_init(args,
689		map_iospace_chunk, &sIOSpaceBase, IOSPACE_SIZE, IOSPACE_CHUNK_SIZE);
690	if (error != B_OK)
691		return error;
692
693	new(&sPhysicalPageMapper) GenericVMPhysicalPageMapper;
694
695	*_physicalPageMapper = &sPhysicalPageMapper;
696	return B_OK;
697}
698
699
700status_t
701arch_vm_translation_map_init_post_area(kernel_args *args)
702{
703	// If the page table doesn't lie within the kernel address space, we
704	// remap it.
705	if (!IS_KERNEL_ADDRESS(sPageTable)) {
706		addr_t newAddress = (addr_t)sPageTable;
707		status_t error = ppc_remap_address_range(&newAddress, sPageTableSize,
708			false);
709		if (error != B_OK) {
710			panic("arch_vm_translation_map_init_post_area(): Failed to remap "
711				"the page table!");
712			return error;
713		}
714
715		// set the new page table address
716		addr_t oldVirtualBase = (addr_t)(sPageTable);
717		sPageTable = (page_table_entry_group*)newAddress;
718
719		// unmap the old pages
720		ppc_unmap_address_range(oldVirtualBase, sPageTableSize);
721
722// TODO: We should probably map the page table via BAT. It is relatively large,
723// and due to being a hash table the access patterns might look sporadic, which
724// certainly isn't to the liking of the TLB.
725	}
726
727	// create an area to cover the page table
728	sPageTableArea = create_area("page_table", (void **)&sPageTable, B_EXACT_ADDRESS,
729		sPageTableSize, B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
730
731	// init physical page mapper
732	status_t error = generic_vm_physical_page_mapper_init_post_area(args);
733	if (error != B_OK)
734		return error;
735
736	return B_OK;
737}
738
739
740status_t
741arch_vm_translation_map_init_post_sem(kernel_args *args)
742{
743	// init physical page mapper
744	return generic_vm_physical_page_mapper_init_post_sem(args);
745}
746
747
748/**	Directly maps a page without having knowledge of any kernel structures.
749 *	Used only during VM setup.
750 *	It currently ignores the "attributes" parameter and sets all pages
751 *	read/write.
752 */
753
754status_t
755arch_vm_translation_map_early_map(kernel_args *ka, addr_t virtualAddress,
756	phys_addr_t physicalAddress, uint8 attributes,
757	phys_addr_t (*get_free_page)(kernel_args *))
758{
759	uint32 virtualSegmentID = get_sr((void *)virtualAddress) & 0xffffff;
760
761	uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, (uint32)virtualAddress);
762	page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
763
764	for (int32 i = 0; i < 8; i++) {
765		// 8 entries in a group
766		if (group->entry[i].valid)
767			continue;
768
769		fill_page_table_entry(&group->entry[i], virtualSegmentID,
770			virtualAddress, physicalAddress, PTE_READ_WRITE, 0, false);
771		return B_OK;
772	}
773
774	hash = page_table_entry::SecondaryHash(hash);
775	group = &sPageTable[hash & sPageTableHashMask];
776
777	for (int32 i = 0; i < 8; i++) {
778		if (group->entry[i].valid)
779			continue;
780
781		fill_page_table_entry(&group->entry[i], virtualSegmentID,
782			virtualAddress, physicalAddress, PTE_READ_WRITE, 0, true);
783		return B_OK;
784	}
785
786	return B_ERROR;
787}
788
789
790// XXX currently assumes this translation map is active
791
792status_t
793arch_vm_translation_map_early_query(addr_t va, phys_addr_t *out_physical)
794{
795	//PANIC_UNIMPLEMENTED();
796	panic("vm_translation_map_quick_query(): not yet implemented\n");
797	return B_OK;
798}
799
800
801// #pragma mark -
802
803
804status_t
805ppc_map_address_range(addr_t virtualAddress, phys_addr_t physicalAddress,
806	size_t size)
807{
808	addr_t virtualEnd = ROUNDUP(virtualAddress + size, B_PAGE_SIZE);
809	virtualAddress = ROUNDDOWN(virtualAddress, B_PAGE_SIZE);
810	physicalAddress = ROUNDDOWN(physicalAddress, B_PAGE_SIZE);
811
812	VMAddressSpace *addressSpace = VMAddressSpace::Kernel();
813	PPCVMTranslationMap* map = static_cast<PPCVMTranslationMap*>(
814		addressSpace->TranslationMap());
815
816	vm_page_reservation reservation;
817	vm_page_reserve_pages(&reservation, 0, VM_PRIORITY_USER);
818		// We don't need any pages for mapping.
819
820	// map the pages
821	for (; virtualAddress < virtualEnd;
822		 virtualAddress += B_PAGE_SIZE, physicalAddress += B_PAGE_SIZE) {
823		status_t error = map->Map(virtualAddress, physicalAddress,
824			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, &reservation);
825		if (error != B_OK)
826			return error;
827	}
828
829	return B_OK;
830}
831
832
833void
834ppc_unmap_address_range(addr_t virtualAddress, size_t size)
835{
836	addr_t virtualEnd = ROUNDUP(virtualAddress + size, B_PAGE_SIZE);
837	virtualAddress = ROUNDDOWN(virtualAddress, B_PAGE_SIZE);
838
839	VMAddressSpace *addressSpace = VMAddressSpace::Kernel();
840
841	PPCVMTranslationMap* map = static_cast<PPCVMTranslationMap*>(
842		addressSpace->TranslationMap());
843	for (0; virtualAddress < virtualEnd; virtualAddress += B_PAGE_SIZE)
844		map->RemovePageTableEntry(virtualAddress);
845}
846
847
848status_t
849ppc_remap_address_range(addr_t *_virtualAddress, size_t size, bool unmap)
850{
851	addr_t virtualAddress = ROUNDDOWN(*_virtualAddress, B_PAGE_SIZE);
852	size = ROUNDUP(*_virtualAddress + size - virtualAddress, B_PAGE_SIZE);
853
854	VMAddressSpace *addressSpace = VMAddressSpace::Kernel();
855
856	// reserve space in the address space
857	void *newAddress = NULL;
858	status_t error = vm_reserve_address_range(addressSpace->ID(), &newAddress,
859		B_ANY_KERNEL_ADDRESS, size, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
860	if (error != B_OK)
861		return error;
862
863	// get the area's first physical page
864	PPCVMTranslationMap* map = static_cast<PPCVMTranslationMap*>(
865		addressSpace->TranslationMap());
866	page_table_entry *entry = map->LookupPageTableEntry(virtualAddress);
867	if (!entry)
868		return B_ERROR;
869	phys_addr_t physicalBase = (phys_addr_t)entry->physical_page_number << 12;
870
871	// map the pages
872	error = ppc_map_address_range((addr_t)newAddress, physicalBase, size);
873	if (error != B_OK)
874		return error;
875
876	*_virtualAddress = (addr_t)newAddress;
877
878	// unmap the old pages
879	if (unmap)
880		ppc_unmap_address_range(virtualAddress, size);
881
882	return B_OK;
883}
884
885
886bool
887arch_vm_translation_map_is_kernel_page_accessible(addr_t virtualAddress,
888	uint32 protection)
889{
890	VMAddressSpace *addressSpace = VMAddressSpace::Kernel();
891
892	PPCVMTranslationMap* map = static_cast<PPCVMTranslationMap*>(
893		addressSpace->TranslationMap());
894
895	phys_addr_t physicalAddress;
896	uint32 flags;
897	if (map->Query(virtualAddress, &physicalAddress, &flags) != B_OK)
898		return false;
899
900	if ((flags & PAGE_PRESENT) == 0)
901		return false;
902
903	// present means kernel-readable, so check for writable
904	return (protection & B_KERNEL_WRITE_AREA) == 0
905		|| (flags & B_KERNEL_WRITE_AREA) != 0;
906}
907