1/*
2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2002-2007, Axel D��rfler, axeld@pinc-software.de. All rights reserved.
4 * Distributed under the terms of the MIT License.
5 *
6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
8 */
9
10/*	(bonefish) Some explanatory words on how address translation is implemented
11	for the 32 bit PPC architecture.
12
13	I use the address type nomenclature as used in the PPC architecture
14	specs, i.e.
15	- effective address: An address as used by program instructions, i.e.
16	  that's what elsewhere (e.g. in the VM implementation) is called
17	  virtual address.
18	- virtual address: An intermediate address computed from the effective
19	  address via the segment registers.
20	- physical address: An address referring to physical storage.
21
22	The hardware translates an effective address to a physical address using
23	either of two mechanisms: 1) Block Address Translation (BAT) or
24	2) segment + page translation. The first mechanism does this directly
25	using two sets (for data/instructions) of special purpose registers.
26	The latter mechanism is of more relevance here, though:
27
28	effective address (32 bit):	     [ 0 ESID  3 | 4  PIX 19 | 20 Byte 31 ]
29								           |           |            |
30							     (segment registers)   |            |
31									       |           |            |
32	virtual address (52 bit):   [ 0      VSID 23 | 24 PIX 39 | 40 Byte 51 ]
33	                            [ 0             VPN       39 | 40 Byte 51 ]
34								                 |                  |
35										   (page table)             |
36											     |                  |
37	physical address (32 bit):       [ 0        PPN       19 | 20 Byte 31 ]
38
39
40	ESID: Effective Segment ID
41	VSID: Virtual Segment ID
42	PIX:  Page Index
43	VPN:  Virtual Page Number
44	PPN:  Physical Page Number
45
46
47	Unlike on x86 we can't just switch the context to another team by just
48	setting a register to another page directory, since we only have one
49	page table containing both kernel and user address mappings. Instead we
50	map the effective address space of kernel and *all* teams
51	non-intersectingly into the virtual address space (which fortunately is
52	20 bits wider), and use the segment registers to select the section of
53	the virtual address space for the current team. Half of the 16 segment
54	registers (8 - 15) map the kernel addresses, so they remain unchanged.
55
56	The range of the virtual address space a team's effective address space
57	is mapped to is defined by its PPCVMTranslationMap::fVSIDBase,
58	which is the first of the 8 successive VSID values used for the team.
59
60	Which fVSIDBase values are already taken is defined by the set bits in
61	the bitmap sVSIDBaseBitmap.
62
63
64	TODO:
65	* If we want to continue to use the OF services, we would need to add
66	  its address mappings to the kernel space. Unfortunately some stuff
67	  (especially RAM) is mapped in an address range without the kernel
68	  address space. We probably need to map those into each team's address
69	  space as kernel read/write areas.
70	* The current locking scheme is insufficient. The page table is a resource
71	  shared by all teams. We need to synchronize access to it. Probably via a
72	  spinlock.
73 */
74
75#include "paging/classic/PPCVMTranslationMapClassic.h"
76
77#include <stdlib.h>
78#include <string.h>
79
80#include <arch/cpu.h>
81#include <arch_mmu.h>
82#include <int.h>
83#include <thread.h>
84#include <slab/Slab.h>
85#include <smp.h>
86#include <util/AutoLock.h>
87#include <util/ThreadAutoLock.h>
88#include <util/queue.h>
89#include <vm/vm_page.h>
90#include <vm/vm_priv.h>
91#include <vm/VMAddressSpace.h>
92#include <vm/VMCache.h>
93
94#include "paging/classic/PPCPagingMethodClassic.h"
95#include "paging/classic/PPCPagingStructuresClassic.h"
96#include "generic_vm_physical_page_mapper.h"
97#include "generic_vm_physical_page_ops.h"
98#include "GenericVMPhysicalPageMapper.h"
99
100
101//#define TRACE_PPC_VM_TRANSLATION_MAP_CLASSIC
102#ifdef TRACE_PPC_VM_TRANSLATION_MAP_CLASSIC
103#	define TRACE(x...) dprintf(x)
104#else
105#	define TRACE(x...) ;
106#endif
107
108
109// The VSID is a 24 bit number. The lower three bits are defined by the
110// (effective) segment number, which leaves us with a 21 bit space of
111// VSID bases (= 2 * 1024 * 1024).
112#define MAX_VSID_BASES (B_PAGE_SIZE * 8)
113static uint32 sVSIDBaseBitmap[MAX_VSID_BASES / (sizeof(uint32) * 8)];
114static spinlock sVSIDBaseBitmapLock;
115
116#define VSID_BASE_SHIFT 3
117#define VADDR_TO_VSID(vsidBase, vaddr) (vsidBase + ((vaddr) >> 28))
118
119
120// #pragma mark -
121
122
123PPCVMTranslationMapClassic::PPCVMTranslationMapClassic()
124	:
125	fPagingStructures(NULL)
126{
127}
128
129
130PPCVMTranslationMapClassic::~PPCVMTranslationMapClassic()
131{
132	if (fPagingStructures == NULL)
133		return;
134
135#if 0//X86
136	if (fPageMapper != NULL)
137		fPageMapper->Delete();
138#endif
139
140	if (fMapCount > 0) {
141		panic("vm_translation_map.destroy_tmap: map %p has positive map count %ld\n",
142			this, fMapCount);
143	}
144
145	// mark the vsid base not in use
146	int baseBit = fVSIDBase >> VSID_BASE_SHIFT;
147	atomic_and((int32 *)&sVSIDBaseBitmap[baseBit / 32],
148			~(1 << (baseBit % 32)));
149
150#if 0//X86
151	if (fPagingStructures->pgdir_virt != NULL) {
152		// cycle through and free all of the user space pgtables
153		for (uint32 i = VADDR_TO_PDENT(USER_BASE);
154				i <= VADDR_TO_PDENT(USER_BASE + (USER_SIZE - 1)); i++) {
155			if ((fPagingStructures->pgdir_virt[i] & PPC_PDE_PRESENT) != 0) {
156				addr_t address = fPagingStructures->pgdir_virt[i]
157					& PPC_PDE_ADDRESS_MASK;
158				vm_page* page = vm_lookup_page(address / B_PAGE_SIZE);
159				if (!page)
160					panic("destroy_tmap: didn't find pgtable page\n");
161				DEBUG_PAGE_ACCESS_START(page);
162				vm_page_set_state(page, PAGE_STATE_FREE);
163			}
164		}
165	}
166#endif
167
168	fPagingStructures->RemoveReference();
169}
170
171
172status_t
173PPCVMTranslationMapClassic::Init(bool kernel)
174{
175	TRACE("PPCVMTranslationMapClassic::Init()\n");
176
177	PPCVMTranslationMap::Init(kernel);
178
179	cpu_status state = disable_interrupts();
180	acquire_spinlock(&sVSIDBaseBitmapLock);
181
182	// allocate a VSID base for this one
183	if (kernel) {
184		// The boot loader has set up the segment registers for identical
185		// mapping. Two VSID bases are reserved for the kernel: 0 and 8. The
186		// latter one for mapping the kernel address space (0x80000000...), the
187		// former one for the lower addresses required by the Open Firmware
188		// services.
189		fVSIDBase = 0;
190		sVSIDBaseBitmap[0] |= 0x3;
191	} else {
192		int i = 0;
193
194		while (i < MAX_VSID_BASES) {
195			if (sVSIDBaseBitmap[i / 32] == 0xffffffff) {
196				i += 32;
197				continue;
198			}
199			if ((sVSIDBaseBitmap[i / 32] & (1 << (i % 32))) == 0) {
200				// we found it
201				sVSIDBaseBitmap[i / 32] |= 1 << (i % 32);
202				break;
203			}
204			i++;
205		}
206		if (i >= MAX_VSID_BASES)
207			panic("vm_translation_map_create: out of VSID bases\n");
208		fVSIDBase = i << VSID_BASE_SHIFT;
209	}
210
211	release_spinlock(&sVSIDBaseBitmapLock);
212	restore_interrupts(state);
213
214	fPagingStructures = new(std::nothrow) PPCPagingStructuresClassic;
215	if (fPagingStructures == NULL)
216		return B_NO_MEMORY;
217
218	PPCPagingMethodClassic* method = PPCPagingMethodClassic::Method();
219
220	if (!kernel) {
221		// user
222#if 0//X86
223		// allocate a physical page mapper
224		status_t error = method->PhysicalPageMapper()
225			->CreateTranslationMapPhysicalPageMapper(&fPageMapper);
226		if (error != B_OK)
227			return error;
228#endif
229#if 0//X86
230		// allocate the page directory
231		page_directory_entry* virtualPageDir = (page_directory_entry*)memalign(
232			B_PAGE_SIZE, B_PAGE_SIZE);
233		if (virtualPageDir == NULL)
234			return B_NO_MEMORY;
235
236		// look up the page directory's physical address
237		phys_addr_t physicalPageDir;
238		vm_get_page_mapping(VMAddressSpace::KernelID(),
239			(addr_t)virtualPageDir, &physicalPageDir);
240#endif
241
242		fPagingStructures->Init(/*NULL, 0,
243			method->KernelVirtualPageDirectory()*/method->PageTable());
244	} else {
245		// kernel
246#if 0//X86
247		// get the physical page mapper
248		fPageMapper = method->KernelPhysicalPageMapper();
249#endif
250
251		// we already know the kernel pgdir mapping
252		fPagingStructures->Init(/*method->KernelVirtualPageDirectory(),
253			method->KernelPhysicalPageDirectory(), NULL*/method->PageTable());
254	}
255
256	return B_OK;
257}
258
259
260void
261PPCVMTranslationMapClassic::ChangeASID()
262{
263// this code depends on the kernel being at 0x80000000, fix if we change that
264#if KERNEL_BASE != 0x80000000
265#error fix me
266#endif
267	int vsidBase = VSIDBase();
268
269	isync();	// synchronize context
270	asm("mtsr	0,%0" : : "g"(vsidBase));
271	asm("mtsr	1,%0" : : "g"(vsidBase + 1));
272	asm("mtsr	2,%0" : : "g"(vsidBase + 2));
273	asm("mtsr	3,%0" : : "g"(vsidBase + 3));
274	asm("mtsr	4,%0" : : "g"(vsidBase + 4));
275	asm("mtsr	5,%0" : : "g"(vsidBase + 5));
276	asm("mtsr	6,%0" : : "g"(vsidBase + 6));
277	asm("mtsr	7,%0" : : "g"(vsidBase + 7));
278	isync();	// synchronize context
279}
280
281
282page_table_entry *
283PPCVMTranslationMapClassic::LookupPageTableEntry(addr_t virtualAddress)
284{
285	// lookup the vsid based off the va
286	uint32 virtualSegmentID = VADDR_TO_VSID(fVSIDBase, virtualAddress);
287
288//	dprintf("vm_translation_map.lookup_page_table_entry: vsid %ld, va 0x%lx\n", virtualSegmentID, virtualAddress);
289
290	PPCPagingMethodClassic* m = PPCPagingMethodClassic::Method();
291
292	// Search for the page table entry using the primary hash value
293
294	uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, virtualAddress);
295	page_table_entry_group *group = &(m->PageTable())[hash & m->PageTableHashMask()];
296
297	for (int i = 0; i < 8; i++) {
298		page_table_entry *entry = &group->entry[i];
299
300		if (entry->virtual_segment_id == virtualSegmentID
301			&& entry->secondary_hash == false
302			&& entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f))
303			return entry;
304	}
305
306	// didn't find it, try the secondary hash value
307
308	hash = page_table_entry::SecondaryHash(hash);
309	group = &(m->PageTable())[hash & m->PageTableHashMask()];
310
311	for (int i = 0; i < 8; i++) {
312		page_table_entry *entry = &group->entry[i];
313
314		if (entry->virtual_segment_id == virtualSegmentID
315			&& entry->secondary_hash == true
316			&& entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f))
317			return entry;
318	}
319
320	return NULL;
321}
322
323
324bool
325PPCVMTranslationMapClassic::RemovePageTableEntry(addr_t virtualAddress)
326{
327	page_table_entry *entry = LookupPageTableEntry(virtualAddress);
328	if (entry == NULL)
329		return false;
330
331	entry->valid = 0;
332	ppc_sync();
333	tlbie(virtualAddress);
334	eieio();
335	tlbsync();
336	ppc_sync();
337
338	return true;
339}
340
341
342size_t
343PPCVMTranslationMapClassic::MaxPagesNeededToMap(addr_t start, addr_t end) const
344{
345	return 0;
346}
347
348
349status_t
350PPCVMTranslationMapClassic::Map(addr_t virtualAddress,
351	phys_addr_t physicalAddress, uint32 attributes,
352	uint32 memoryType, vm_page_reservation* reservation)
353{
354	TRACE("map_tmap: entry pa 0x%lx va 0x%lx\n", pa, va);
355
356	// lookup the vsid based off the va
357	uint32 virtualSegmentID = VADDR_TO_VSID(fVSIDBase, virtualAddress);
358	uint32 protection = 0;
359
360	// ToDo: check this
361	// all kernel mappings are R/W to supervisor code
362	if (attributes & (B_READ_AREA | B_WRITE_AREA))
363		protection = (attributes & B_WRITE_AREA) ? PTE_READ_WRITE : PTE_READ_ONLY;
364
365	//dprintf("vm_translation_map.map_tmap: vsid %d, pa 0x%lx, va 0x%lx\n", vsid, pa, va);
366
367	PPCPagingMethodClassic* m = PPCPagingMethodClassic::Method();
368
369	// Search for a free page table slot using the primary hash value
370	uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, virtualAddress);
371	page_table_entry_group *group = &(m->PageTable())[hash & m->PageTableHashMask()];
372
373	for (int i = 0; i < 8; i++) {
374		page_table_entry *entry = &group->entry[i];
375
376		if (entry->valid)
377			continue;
378
379		m->FillPageTableEntry(entry, virtualSegmentID, virtualAddress,
380			physicalAddress, protection, memoryType, false);
381		fMapCount++;
382		return B_OK;
383	}
384
385	// Didn't found one, try the secondary hash value
386
387	hash = page_table_entry::SecondaryHash(hash);
388	group = &(m->PageTable())[hash & m->PageTableHashMask()];
389
390	for (int i = 0; i < 8; i++) {
391		page_table_entry *entry = &group->entry[i];
392
393		if (entry->valid)
394			continue;
395
396		m->FillPageTableEntry(entry, virtualSegmentID, virtualAddress,
397			physicalAddress, protection, memoryType, false);
398		fMapCount++;
399		return B_OK;
400	}
401
402	panic("vm_translation_map.map_tmap: hash table full\n");
403	return B_ERROR;
404
405#if 0//X86
406/*
407	dprintf("pgdir at 0x%x\n", pgdir);
408	dprintf("index is %d\n", va / B_PAGE_SIZE / 1024);
409	dprintf("final at 0x%x\n", &pgdir[va / B_PAGE_SIZE / 1024]);
410	dprintf("value is 0x%x\n", *(int *)&pgdir[va / B_PAGE_SIZE / 1024]);
411	dprintf("present bit is %d\n", pgdir[va / B_PAGE_SIZE / 1024].present);
412	dprintf("addr is %d\n", pgdir[va / B_PAGE_SIZE / 1024].addr);
413*/
414	page_directory_entry* pd = fPagingStructures->pgdir_virt;
415
416	// check to see if a page table exists for this range
417	uint32 index = VADDR_TO_PDENT(va);
418	if ((pd[index] & PPC_PDE_PRESENT) == 0) {
419		phys_addr_t pgtable;
420		vm_page *page;
421
422		// we need to allocate a pgtable
423		page = vm_page_allocate_page(reservation,
424			PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
425
426		DEBUG_PAGE_ACCESS_END(page);
427
428		pgtable = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
429
430		TRACE("map_tmap: asked for free page for pgtable. 0x%lx\n", pgtable);
431
432		// put it in the pgdir
433		PPCPagingMethodClassic::PutPageTableInPageDir(&pd[index], pgtable,
434			attributes
435				| ((attributes & B_USER_PROTECTION) != 0
436						? B_WRITE_AREA : B_KERNEL_WRITE_AREA));
437
438		// update any other page directories, if it maps kernel space
439		if (index >= FIRST_KERNEL_PGDIR_ENT
440			&& index < (FIRST_KERNEL_PGDIR_ENT + NUM_KERNEL_PGDIR_ENTS)) {
441			PPCPagingStructuresClassic::UpdateAllPageDirs(index, pd[index]);
442		}
443
444		fMapCount++;
445	}
446
447	// now, fill in the pentry
448	Thread* thread = thread_get_current_thread();
449	ThreadCPUPinner pinner(thread);
450
451	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
452		pd[index] & PPC_PDE_ADDRESS_MASK);
453	index = VADDR_TO_PTENT(va);
454
455	ASSERT_PRINT((pt[index] & PPC_PTE_PRESENT) == 0,
456		"virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx32, va,
457		pt[index]);
458
459	PPCPagingMethodClassic::PutPageTableEntryInTable(&pt[index], pa, attributes,
460		memoryType, fIsKernelMap);
461
462	pinner.Unlock();
463
464	// Note: We don't need to invalidate the TLB for this address, as previously
465	// the entry was not present and the TLB doesn't cache those entries.
466
467	fMapCount++;
468
469	return 0;
470#endif
471}
472
473
474status_t
475PPCVMTranslationMapClassic::Unmap(addr_t start, addr_t end)
476{
477	page_table_entry *entry;
478
479	start = ROUNDDOWN(start, B_PAGE_SIZE);
480	end = ROUNDUP(end, B_PAGE_SIZE);
481
482	if (start >= end)
483		return B_OK;
484
485	TRACE("unmap_tmap: asked to free pages 0x%lx to 0x%lx\n", start, end);
486
487//	dprintf("vm_translation_map.unmap_tmap: start 0x%lx, end 0x%lx\n", start, end);
488
489	while (start < end) {
490		if (RemovePageTableEntry(start))
491			fMapCount--;
492
493		start += B_PAGE_SIZE;
494	}
495
496	return B_OK;
497
498#if 0//X86
499
500	start = ROUNDDOWN(start, B_PAGE_SIZE);
501	if (start >= end)
502		return B_OK;
503
504	TRACE("unmap_tmap: asked to free pages 0x%lx to 0x%lx\n", start, end);
505
506	page_directory_entry *pd = fPagingStructures->pgdir_virt;
507
508	do {
509		int index = VADDR_TO_PDENT(start);
510		if ((pd[index] & PPC_PDE_PRESENT) == 0) {
511			// no page table here, move the start up to access the next page
512			// table
513			start = ROUNDUP(start + 1, kPageTableAlignment);
514			continue;
515		}
516
517		Thread* thread = thread_get_current_thread();
518		ThreadCPUPinner pinner(thread);
519
520		page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
521			pd[index] & PPC_PDE_ADDRESS_MASK);
522
523		for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < end);
524				index++, start += B_PAGE_SIZE) {
525			if ((pt[index] & PPC_PTE_PRESENT) == 0) {
526				// page mapping not valid
527				continue;
528			}
529
530			TRACE("unmap_tmap: removing page 0x%lx\n", start);
531
532			page_table_entry oldEntry
533				= PPCPagingMethodClassic::ClearPageTableEntryFlags(&pt[index],
534					PPC_PTE_PRESENT);
535			fMapCount--;
536
537			if ((oldEntry & PPC_PTE_ACCESSED) != 0) {
538				// Note, that we only need to invalidate the address, if the
539				// accessed flags was set, since only then the entry could have
540				// been in any TLB.
541				InvalidatePage(start);
542			}
543		}
544	} while (start != 0 && start < end);
545
546	return B_OK;
547#endif
548}
549
550
551status_t
552PPCVMTranslationMapClassic::RemapAddressRange(addr_t *_virtualAddress,
553	size_t size, bool unmap)
554{
555	addr_t virtualAddress = ROUNDDOWN(*_virtualAddress, B_PAGE_SIZE);
556	size = ROUNDUP(*_virtualAddress + size - virtualAddress, B_PAGE_SIZE);
557
558	VMAddressSpace *addressSpace = VMAddressSpace::Kernel();
559
560	// reserve space in the address space
561	void *newAddress = NULL;
562	status_t error = vm_reserve_address_range(addressSpace->ID(), &newAddress,
563		B_ANY_KERNEL_ADDRESS, size, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
564	if (error != B_OK)
565		return error;
566
567	// get the area's first physical page
568	page_table_entry *entry = LookupPageTableEntry(virtualAddress);
569	if (!entry)
570		return B_ERROR;
571	phys_addr_t physicalBase = (phys_addr_t)entry->physical_page_number << 12;
572
573	// map the pages
574	error = ppc_map_address_range((addr_t)newAddress, physicalBase, size);
575	if (error != B_OK)
576		return error;
577
578	*_virtualAddress = (addr_t)newAddress;
579
580	// unmap the old pages
581	if (unmap)
582		ppc_unmap_address_range(virtualAddress, size);
583
584	return B_OK;
585}
586
587
588status_t
589PPCVMTranslationMapClassic::DebugMarkRangePresent(addr_t start, addr_t end,
590	bool markPresent)
591{
592	panic("%s: UNIMPLEMENTED", __FUNCTION__);
593	return B_ERROR;
594#if 0//X86
595	start = ROUNDDOWN(start, B_PAGE_SIZE);
596	if (start >= end)
597		return B_OK;
598
599	page_directory_entry *pd = fPagingStructures->pgdir_virt;
600
601	do {
602		int index = VADDR_TO_PDENT(start);
603		if ((pd[index] & PPC_PDE_PRESENT) == 0) {
604			// no page table here, move the start up to access the next page
605			// table
606			start = ROUNDUP(start + 1, kPageTableAlignment);
607			continue;
608		}
609
610		Thread* thread = thread_get_current_thread();
611		ThreadCPUPinner pinner(thread);
612
613		page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
614			pd[index] & PPC_PDE_ADDRESS_MASK);
615
616		for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < end);
617				index++, start += B_PAGE_SIZE) {
618			if ((pt[index] & PPC_PTE_PRESENT) == 0) {
619				if (!markPresent)
620					continue;
621
622				PPCPagingMethodClassic::SetPageTableEntryFlags(&pt[index],
623					PPC_PTE_PRESENT);
624			} else {
625				if (markPresent)
626					continue;
627
628				page_table_entry oldEntry
629					= PPCPagingMethodClassic::ClearPageTableEntryFlags(&pt[index],
630						PPC_PTE_PRESENT);
631
632				if ((oldEntry & PPC_PTE_ACCESSED) != 0) {
633					// Note, that we only need to invalidate the address, if the
634					// accessed flags was set, since only then the entry could
635					// have been in any TLB.
636					InvalidatePage(start);
637				}
638			}
639		}
640	} while (start != 0 && start < end);
641
642	return B_OK;
643#endif
644}
645
646
647/*!	Caller must have locked the cache of the page to be unmapped.
648	This object shouldn't be locked.
649*/
650status_t
651PPCVMTranslationMapClassic::UnmapPage(VMArea* area, addr_t address,
652	bool updatePageQueue)
653{
654	ASSERT(address % B_PAGE_SIZE == 0);
655
656	RecursiveLocker locker(fLock);
657
658	if (area->cache_type == CACHE_TYPE_DEVICE) {
659		if (!RemovePageTableEntry(address))
660			return B_ENTRY_NOT_FOUND;
661
662		fMapCount--;
663		return B_OK;
664	}
665
666	page_table_entry* entry = LookupPageTableEntry(address);
667	if (entry == NULL)
668		return B_ENTRY_NOT_FOUND;
669
670	page_num_t pageNumber = entry->physical_page_number;
671	bool accessed = entry->referenced;
672	bool modified = entry->changed;
673
674	RemovePageTableEntry(address);
675
676	fMapCount--;
677
678	locker.Detach();
679		// PageUnmapped() will unlock for us
680
681	PageUnmapped(area, pageNumber, accessed, modified, updatePageQueue);
682
683	return B_OK;
684
685#if 0//X86
686
687	ASSERT(address % B_PAGE_SIZE == 0);
688
689	page_directory_entry* pd = fPagingStructures->pgdir_virt;
690
691	TRACE("PPCVMTranslationMapClassic::UnmapPage(%#" B_PRIxADDR ")\n", address);
692
693	RecursiveLocker locker(fLock);
694
695	int index = VADDR_TO_PDENT(address);
696	if ((pd[index] & PPC_PDE_PRESENT) == 0)
697		return B_ENTRY_NOT_FOUND;
698
699	ThreadCPUPinner pinner(thread_get_current_thread());
700
701	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
702		pd[index] & PPC_PDE_ADDRESS_MASK);
703
704	index = VADDR_TO_PTENT(address);
705	page_table_entry oldEntry = PPCPagingMethodClassic::ClearPageTableEntry(
706		&pt[index]);
707
708	pinner.Unlock();
709
710	if ((oldEntry & PPC_PTE_PRESENT) == 0) {
711		// page mapping not valid
712		return B_ENTRY_NOT_FOUND;
713	}
714
715	fMapCount--;
716
717	if ((oldEntry & PPC_PTE_ACCESSED) != 0) {
718		// Note, that we only need to invalidate the address, if the
719		// accessed flags was set, since only then the entry could have been
720		// in any TLB.
721		InvalidatePage(address);
722		Flush();
723
724		// NOTE: Between clearing the page table entry and Flush() other
725		// processors (actually even this processor with another thread of the
726		// same team) could still access the page in question via their cached
727		// entry. We can obviously lose a modified flag in this case, with the
728		// effect that the page looks unmodified (and might thus be recycled),
729		// but is actually modified.
730		// In most cases this is harmless, but for vm_remove_all_page_mappings()
731		// this is actually a problem.
732		// Interestingly FreeBSD seems to ignore this problem as well
733		// (cf. pmap_remove_all()), unless I've missed something.
734	}
735
736	locker.Detach();
737		// PageUnmapped() will unlock for us
738
739	PageUnmapped(area, (oldEntry & PPC_PTE_ADDRESS_MASK) / B_PAGE_SIZE,
740		(oldEntry & PPC_PTE_ACCESSED) != 0, (oldEntry & PPC_PTE_DIRTY) != 0,
741		updatePageQueue);
742
743	return B_OK;
744#endif
745}
746
747
748void
749PPCVMTranslationMapClassic::UnmapPages(VMArea* area, addr_t base, size_t size,
750	bool updatePageQueue)
751{
752	panic("%s: UNIMPLEMENTED", __FUNCTION__);
753#if 0//X86
754	if (size == 0)
755		return;
756
757	addr_t start = base;
758	addr_t end = base + size - 1;
759
760	TRACE("PPCVMTranslationMapClassic::UnmapPages(%p, %#" B_PRIxADDR ", %#"
761		B_PRIxADDR ")\n", area, start, end);
762
763	page_directory_entry* pd = fPagingStructures->pgdir_virt;
764
765	VMAreaMappings queue;
766
767	RecursiveLocker locker(fLock);
768
769	do {
770		int index = VADDR_TO_PDENT(start);
771		if ((pd[index] & PPC_PDE_PRESENT) == 0) {
772			// no page table here, move the start up to access the next page
773			// table
774			start = ROUNDUP(start + 1, kPageTableAlignment);
775			continue;
776		}
777
778		Thread* thread = thread_get_current_thread();
779		ThreadCPUPinner pinner(thread);
780
781		page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
782			pd[index] & PPC_PDE_ADDRESS_MASK);
783
784		for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < end);
785				index++, start += B_PAGE_SIZE) {
786			page_table_entry oldEntry
787				= PPCPagingMethodClassic::ClearPageTableEntry(&pt[index]);
788			if ((oldEntry & PPC_PTE_PRESENT) == 0)
789				continue;
790
791			fMapCount--;
792
793			if ((oldEntry & PPC_PTE_ACCESSED) != 0) {
794				// Note, that we only need to invalidate the address, if the
795				// accessed flags was set, since only then the entry could have
796				// been in any TLB.
797				InvalidatePage(start);
798			}
799
800			if (area->cache_type != CACHE_TYPE_DEVICE) {
801				// get the page
802				vm_page* page = vm_lookup_page(
803					(oldEntry & PPC_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
804				ASSERT(page != NULL);
805
806				DEBUG_PAGE_ACCESS_START(page);
807
808				// transfer the accessed/dirty flags to the page
809				if ((oldEntry & PPC_PTE_ACCESSED) != 0)
810					page->accessed = true;
811				if ((oldEntry & PPC_PTE_DIRTY) != 0)
812					page->modified = true;
813
814				// remove the mapping object/decrement the wired_count of the
815				// page
816				if (area->wiring == B_NO_LOCK) {
817					vm_page_mapping* mapping = NULL;
818					vm_page_mappings::Iterator iterator
819						= page->mappings.GetIterator();
820					while ((mapping = iterator.Next()) != NULL) {
821						if (mapping->area == area)
822							break;
823					}
824
825					ASSERT(mapping != NULL);
826
827					area->mappings.Remove(mapping);
828					page->mappings.Remove(mapping);
829					queue.Add(mapping);
830				} else
831					page->DecrementWiredCount();
832
833				if (!page->IsMapped()) {
834					atomic_add(&gMappedPagesCount, -1);
835
836					if (updatePageQueue) {
837						if (page->Cache()->temporary)
838							vm_page_set_state(page, PAGE_STATE_INACTIVE);
839						else if (page->modified)
840							vm_page_set_state(page, PAGE_STATE_MODIFIED);
841						else
842							vm_page_set_state(page, PAGE_STATE_CACHED);
843					}
844				}
845
846				DEBUG_PAGE_ACCESS_END(page);
847			}
848		}
849
850		Flush();
851			// flush explicitly, since we directly use the lock
852	} while (start != 0 && start < end);
853
854	// TODO: As in UnmapPage() we can lose page dirty flags here. ATM it's not
855	// really critical here, as in all cases this method is used, the unmapped
856	// area range is unmapped for good (resized/cut) and the pages will likely
857	// be freed.
858
859	locker.Unlock();
860
861	// free removed mappings
862	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
863	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
864		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
865	while (vm_page_mapping* mapping = queue.RemoveHead())
866		object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
867#endif
868}
869
870
871void
872PPCVMTranslationMapClassic::UnmapArea(VMArea* area, bool deletingAddressSpace,
873	bool ignoreTopCachePageFlags)
874{
875	panic("%s: UNIMPLEMENTED", __FUNCTION__);
876#if 0//X86
877	if (area->cache_type == CACHE_TYPE_DEVICE || area->wiring != B_NO_LOCK) {
878		PPCVMTranslationMapClassic::UnmapPages(area, area->Base(), area->Size(),
879			true);
880		return;
881	}
882
883	bool unmapPages = !deletingAddressSpace || !ignoreTopCachePageFlags;
884
885	page_directory_entry* pd = fPagingStructures->pgdir_virt;
886
887	RecursiveLocker locker(fLock);
888
889	VMAreaMappings mappings;
890	mappings.MoveFrom(&area->mappings);
891
892	for (VMAreaMappings::Iterator it = mappings.GetIterator();
893			vm_page_mapping* mapping = it.Next();) {
894		vm_page* page = mapping->page;
895		page->mappings.Remove(mapping);
896
897		VMCache* cache = page->Cache();
898
899		bool pageFullyUnmapped = false;
900		if (!page->IsMapped()) {
901			atomic_add(&gMappedPagesCount, -1);
902			pageFullyUnmapped = true;
903		}
904
905		if (unmapPages || cache != area->cache) {
906			addr_t address = area->Base()
907				+ ((page->cache_offset * B_PAGE_SIZE) - area->cache_offset);
908
909			int index = VADDR_TO_PDENT(address);
910			if ((pd[index] & PPC_PDE_PRESENT) == 0) {
911				panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
912					"has no page dir entry", page, area, address);
913				continue;
914			}
915
916			ThreadCPUPinner pinner(thread_get_current_thread());
917
918			page_table_entry* pt
919				= (page_table_entry*)fPageMapper->GetPageTableAt(
920					pd[index] & PPC_PDE_ADDRESS_MASK);
921			page_table_entry oldEntry
922				= PPCPagingMethodClassic::ClearPageTableEntry(
923					&pt[VADDR_TO_PTENT(address)]);
924
925			pinner.Unlock();
926
927			if ((oldEntry & PPC_PTE_PRESENT) == 0) {
928				panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
929					"has no page table entry", page, area, address);
930				continue;
931			}
932
933			// transfer the accessed/dirty flags to the page and invalidate
934			// the mapping, if necessary
935			if ((oldEntry & PPC_PTE_ACCESSED) != 0) {
936				page->accessed = true;
937
938				if (!deletingAddressSpace)
939					InvalidatePage(address);
940			}
941
942			if ((oldEntry & PPC_PTE_DIRTY) != 0)
943				page->modified = true;
944
945			if (pageFullyUnmapped) {
946				DEBUG_PAGE_ACCESS_START(page);
947
948				if (cache->temporary)
949					vm_page_set_state(page, PAGE_STATE_INACTIVE);
950				else if (page->modified)
951					vm_page_set_state(page, PAGE_STATE_MODIFIED);
952				else
953					vm_page_set_state(page, PAGE_STATE_CACHED);
954
955				DEBUG_PAGE_ACCESS_END(page);
956			}
957		}
958
959		fMapCount--;
960	}
961
962	Flush();
963		// flush explicitely, since we directly use the lock
964
965	locker.Unlock();
966
967	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
968	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
969		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
970	while (vm_page_mapping* mapping = mappings.RemoveHead())
971		object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
972#endif
973}
974
975
976status_t
977PPCVMTranslationMapClassic::Query(addr_t va, phys_addr_t *_outPhysical,
978	uint32 *_outFlags)
979{
980	page_table_entry *entry;
981
982	// default the flags to not present
983	*_outFlags = 0;
984	*_outPhysical = 0;
985
986	entry = LookupPageTableEntry(va);
987	if (entry == NULL)
988		return B_NO_ERROR;
989
990	// ToDo: check this!
991	if (IS_KERNEL_ADDRESS(va))
992		*_outFlags |= B_KERNEL_READ_AREA | (entry->page_protection == PTE_READ_ONLY ? 0 : B_KERNEL_WRITE_AREA);
993	else
994		*_outFlags |= B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_READ_AREA | (entry->page_protection == PTE_READ_ONLY ? 0 : B_WRITE_AREA);
995
996	*_outFlags |= entry->changed ? PAGE_MODIFIED : 0;
997	*_outFlags |= entry->referenced ? PAGE_ACCESSED : 0;
998	*_outFlags |= entry->valid ? PAGE_PRESENT : 0;
999
1000	*_outPhysical = entry->physical_page_number * B_PAGE_SIZE;
1001
1002	return B_OK;
1003
1004#if 0//X86
1005	// default the flags to not present
1006	*_flags = 0;
1007	*_physical = 0;
1008
1009	int index = VADDR_TO_PDENT(va);
1010	page_directory_entry *pd = fPagingStructures->pgdir_virt;
1011	if ((pd[index] & PPC_PDE_PRESENT) == 0) {
1012		// no pagetable here
1013		return B_OK;
1014	}
1015
1016	Thread* thread = thread_get_current_thread();
1017	ThreadCPUPinner pinner(thread);
1018
1019	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
1020		pd[index] & PPC_PDE_ADDRESS_MASK);
1021	page_table_entry entry = pt[VADDR_TO_PTENT(va)];
1022
1023	*_physical = entry & PPC_PDE_ADDRESS_MASK;
1024
1025	// read in the page state flags
1026	if ((entry & PPC_PTE_USER) != 0) {
1027		*_flags |= ((entry & PPC_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
1028			| B_READ_AREA;
1029	}
1030
1031	*_flags |= ((entry & PPC_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
1032		| B_KERNEL_READ_AREA
1033		| ((entry & PPC_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
1034		| ((entry & PPC_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
1035		| ((entry & PPC_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
1036
1037	pinner.Unlock();
1038
1039	TRACE("query_tmap: returning pa 0x%lx for va 0x%lx\n", *_physical, va);
1040
1041	return B_OK;
1042#endif
1043}
1044
1045
1046status_t
1047PPCVMTranslationMapClassic::QueryInterrupt(addr_t virtualAddress,
1048	phys_addr_t *_physicalAddress, uint32 *_flags)
1049{
1050	return PPCVMTranslationMapClassic::Query(virtualAddress, _physicalAddress, _flags);
1051
1052#if 0//X86
1053	*_flags = 0;
1054	*_physical = 0;
1055
1056	int index = VADDR_TO_PDENT(va);
1057	page_directory_entry* pd = fPagingStructures->pgdir_virt;
1058	if ((pd[index] & PPC_PDE_PRESENT) == 0) {
1059		// no pagetable here
1060		return B_OK;
1061	}
1062
1063	// map page table entry
1064	page_table_entry* pt = (page_table_entry*)PPCPagingMethodClassic::Method()
1065		->PhysicalPageMapper()->InterruptGetPageTableAt(
1066			pd[index] & PPC_PDE_ADDRESS_MASK);
1067	page_table_entry entry = pt[VADDR_TO_PTENT(va)];
1068
1069	*_physical = entry & PPC_PDE_ADDRESS_MASK;
1070
1071	// read in the page state flags
1072	if ((entry & PPC_PTE_USER) != 0) {
1073		*_flags |= ((entry & PPC_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
1074			| B_READ_AREA;
1075	}
1076
1077	*_flags |= ((entry & PPC_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
1078		| B_KERNEL_READ_AREA
1079		| ((entry & PPC_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
1080		| ((entry & PPC_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
1081		| ((entry & PPC_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
1082
1083	return B_OK;
1084#endif
1085}
1086
1087
1088status_t
1089PPCVMTranslationMapClassic::Protect(addr_t start, addr_t end, uint32 attributes,
1090	uint32 memoryType)
1091{
1092	// XXX finish
1093	return B_ERROR;
1094#if 0//X86
1095	start = ROUNDDOWN(start, B_PAGE_SIZE);
1096	if (start >= end)
1097		return B_OK;
1098
1099	TRACE("protect_tmap: pages 0x%lx to 0x%lx, attributes %lx\n", start, end,
1100		attributes);
1101
1102	// compute protection flags
1103	uint32 newProtectionFlags = 0;
1104	if ((attributes & B_USER_PROTECTION) != 0) {
1105		newProtectionFlags = PPC_PTE_USER;
1106		if ((attributes & B_WRITE_AREA) != 0)
1107			newProtectionFlags |= PPC_PTE_WRITABLE;
1108	} else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
1109		newProtectionFlags = PPC_PTE_WRITABLE;
1110
1111	page_directory_entry *pd = fPagingStructures->pgdir_virt;
1112
1113	do {
1114		int index = VADDR_TO_PDENT(start);
1115		if ((pd[index] & PPC_PDE_PRESENT) == 0) {
1116			// no page table here, move the start up to access the next page
1117			// table
1118			start = ROUNDUP(start + 1, kPageTableAlignment);
1119			continue;
1120		}
1121
1122		Thread* thread = thread_get_current_thread();
1123		ThreadCPUPinner pinner(thread);
1124
1125		page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
1126			pd[index] & PPC_PDE_ADDRESS_MASK);
1127
1128		for (index = VADDR_TO_PTENT(start); index < 1024 && start < end;
1129				index++, start += B_PAGE_SIZE) {
1130			page_table_entry entry = pt[index];
1131			if ((entry & PPC_PTE_PRESENT) == 0) {
1132				// page mapping not valid
1133				continue;
1134			}
1135
1136			TRACE("protect_tmap: protect page 0x%lx\n", start);
1137
1138			// set the new protection flags -- we want to do that atomically,
1139			// without changing the accessed or dirty flag
1140			page_table_entry oldEntry;
1141			while (true) {
1142				oldEntry = PPCPagingMethodClassic::TestAndSetPageTableEntry(
1143					&pt[index],
1144					(entry & ~(PPC_PTE_PROTECTION_MASK
1145							| PPC_PTE_MEMORY_TYPE_MASK))
1146						| newProtectionFlags
1147						| PPCPagingMethodClassic::MemoryTypeToPageTableEntryFlags(
1148							memoryType),
1149					entry);
1150				if (oldEntry == entry)
1151					break;
1152				entry = oldEntry;
1153			}
1154
1155			if ((oldEntry & PPC_PTE_ACCESSED) != 0) {
1156				// Note, that we only need to invalidate the address, if the
1157				// accessed flag was set, since only then the entry could have
1158				// been in any TLB.
1159				InvalidatePage(start);
1160			}
1161		}
1162	} while (start != 0 && start < end);
1163
1164	return B_OK;
1165#endif
1166}
1167
1168
1169status_t
1170PPCVMTranslationMapClassic::ClearFlags(addr_t virtualAddress, uint32 flags)
1171{
1172	page_table_entry *entry = LookupPageTableEntry(virtualAddress);
1173	if (entry == NULL)
1174		return B_NO_ERROR;
1175
1176	bool modified = false;
1177
1178	// clear the bits
1179	if (flags & PAGE_MODIFIED && entry->changed) {
1180		entry->changed = false;
1181		modified = true;
1182	}
1183	if (flags & PAGE_ACCESSED && entry->referenced) {
1184		entry->referenced = false;
1185		modified = true;
1186	}
1187
1188	// synchronize
1189	if (modified) {
1190		tlbie(virtualAddress);
1191		eieio();
1192		tlbsync();
1193		ppc_sync();
1194	}
1195
1196	return B_OK;
1197
1198#if 0//X86
1199	int index = VADDR_TO_PDENT(va);
1200	page_directory_entry* pd = fPagingStructures->pgdir_virt;
1201	if ((pd[index] & PPC_PDE_PRESENT) == 0) {
1202		// no pagetable here
1203		return B_OK;
1204	}
1205
1206	uint32 flagsToClear = ((flags & PAGE_MODIFIED) ? PPC_PTE_DIRTY : 0)
1207		| ((flags & PAGE_ACCESSED) ? PPC_PTE_ACCESSED : 0);
1208
1209	Thread* thread = thread_get_current_thread();
1210	ThreadCPUPinner pinner(thread);
1211
1212	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
1213		pd[index] & PPC_PDE_ADDRESS_MASK);
1214	index = VADDR_TO_PTENT(va);
1215
1216	// clear out the flags we've been requested to clear
1217	page_table_entry oldEntry
1218		= PPCPagingMethodClassic::ClearPageTableEntryFlags(&pt[index],
1219			flagsToClear);
1220
1221	pinner.Unlock();
1222
1223	if ((oldEntry & flagsToClear) != 0)
1224		InvalidatePage(va);
1225
1226	return B_OK;
1227#endif
1228}
1229
1230
1231bool
1232PPCVMTranslationMapClassic::ClearAccessedAndModified(VMArea* area,
1233	addr_t address, bool unmapIfUnaccessed, bool& _modified)
1234{
1235	ASSERT(address % B_PAGE_SIZE == 0);
1236
1237	// TODO: Implement for real! ATM this is just an approximation using
1238	// Query(), ClearFlags(), and UnmapPage(). See below!
1239
1240	RecursiveLocker locker(fLock);
1241
1242	uint32 flags;
1243	phys_addr_t physicalAddress;
1244	if (Query(address, &physicalAddress, &flags) != B_OK
1245		|| (flags & PAGE_PRESENT) == 0) {
1246		return false;
1247	}
1248
1249	_modified = (flags & PAGE_MODIFIED) != 0;
1250
1251	if ((flags & (PAGE_ACCESSED | PAGE_MODIFIED)) != 0)
1252		ClearFlags(address, flags & (PAGE_ACCESSED | PAGE_MODIFIED));
1253
1254	if ((flags & PAGE_ACCESSED) != 0)
1255		return true;
1256
1257	if (!unmapIfUnaccessed)
1258		return false;
1259
1260	locker.Unlock();
1261
1262	UnmapPage(area, address, false);
1263		// TODO: Obvious race condition: Between querying and unmapping the
1264		// page could have been accessed. We try to compensate by considering
1265		// vm_page::{accessed,modified} (which would have been updated by
1266		// UnmapPage()) below, but that doesn't quite match the required
1267		// semantics of the method.
1268
1269	vm_page* page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
1270	if (page == NULL)
1271		return false;
1272
1273	_modified |= page->modified;
1274
1275	return page->accessed;
1276
1277#if 0//X86
1278	page_directory_entry* pd = fPagingStructures->pgdir_virt;
1279
1280	TRACE("PPCVMTranslationMapClassic::ClearAccessedAndModified(%#" B_PRIxADDR
1281		")\n", address);
1282
1283	RecursiveLocker locker(fLock);
1284
1285	int index = VADDR_TO_PDENT(address);
1286	if ((pd[index] & PPC_PDE_PRESENT) == 0)
1287		return false;
1288
1289	ThreadCPUPinner pinner(thread_get_current_thread());
1290
1291	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
1292		pd[index] & PPC_PDE_ADDRESS_MASK);
1293
1294	index = VADDR_TO_PTENT(address);
1295
1296	// perform the deed
1297	page_table_entry oldEntry;
1298
1299	if (unmapIfUnaccessed) {
1300		while (true) {
1301			oldEntry = pt[index];
1302			if ((oldEntry & PPC_PTE_PRESENT) == 0) {
1303				// page mapping not valid
1304				return false;
1305			}
1306
1307			if (oldEntry & PPC_PTE_ACCESSED) {
1308				// page was accessed -- just clear the flags
1309				oldEntry = PPCPagingMethodClassic::ClearPageTableEntryFlags(
1310					&pt[index], PPC_PTE_ACCESSED | PPC_PTE_DIRTY);
1311				break;
1312			}
1313
1314			// page hasn't been accessed -- unmap it
1315			if (PPCPagingMethodClassic::TestAndSetPageTableEntry(&pt[index], 0,
1316					oldEntry) == oldEntry) {
1317				break;
1318			}
1319
1320			// something changed -- check again
1321		}
1322	} else {
1323		oldEntry = PPCPagingMethodClassic::ClearPageTableEntryFlags(&pt[index],
1324			PPC_PTE_ACCESSED | PPC_PTE_DIRTY);
1325	}
1326
1327	pinner.Unlock();
1328
1329	_modified = (oldEntry & PPC_PTE_DIRTY) != 0;
1330
1331	if ((oldEntry & PPC_PTE_ACCESSED) != 0) {
1332		// Note, that we only need to invalidate the address, if the
1333		// accessed flags was set, since only then the entry could have been
1334		// in any TLB.
1335		InvalidatePage(address);
1336
1337		Flush();
1338
1339		return true;
1340	}
1341
1342	if (!unmapIfUnaccessed)
1343		return false;
1344
1345	// We have unmapped the address. Do the "high level" stuff.
1346
1347	fMapCount--;
1348
1349	locker.Detach();
1350		// UnaccessedPageUnmapped() will unlock for us
1351
1352	UnaccessedPageUnmapped(area,
1353		(oldEntry & PPC_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
1354
1355	return false;
1356#endif
1357}
1358
1359
1360PPCPagingStructures*
1361PPCVMTranslationMapClassic::PagingStructures() const
1362{
1363	return fPagingStructures;
1364}
1365