1/*
2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2002-2010, Axel D��rfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
5 *
6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
8 */
9
10
11#include "paging/pae/X86VMTranslationMapPAE.h"
12
13#include <int.h>
14#include <slab/Slab.h>
15#include <thread.h>
16#include <tracing.h>
17#include <util/AutoLock.h>
18#include <util/ThreadAutoLock.h>
19#include <vm/vm_page.h>
20#include <vm/VMAddressSpace.h>
21#include <vm/VMCache.h>
22
23#include "paging/pae/X86PagingMethodPAE.h"
24#include "paging/pae/X86PagingStructuresPAE.h"
25#include "paging/x86_physical_page_mapper.h"
26
27
28//#define TRACE_X86_VM_TRANSLATION_MAP_PAE
29#ifdef TRACE_X86_VM_TRANSLATION_MAP_PAE
30#	define TRACE(x...) dprintf(x)
31#else
32#	define TRACE(x...) ;
33#endif
34
35
36#if B_HAIKU_PHYSICAL_BITS == 64
37
38
39#if TRANSLATION_MAP_TRACING
40
41
42namespace TranslationMapTracing {
43
44
45class TranslationMapTraceEntryBase
46	: public TRACE_ENTRY_SELECTOR(TRANSLATION_MAP_TRACING_STACK_TRACE) {
47public:
48	TranslationMapTraceEntryBase()
49		:
50		TraceEntryBase(TRANSLATION_MAP_TRACING_STACK_TRACE, 0, true)
51	{
52	}
53
54	void PrintPageTableEntry(TraceOutput& out, pae_page_table_entry entry)
55	{
56		out.Print("%#" B_PRIx64  " %c%c%c%c%c %s %s %c%c",
57			entry & X86_PAE_PTE_ADDRESS_MASK,
58			(entry & X86_PAE_PTE_PRESENT) != 0 ? 'P' : '-',
59			(entry & X86_PAE_PTE_WRITABLE) != 0 ? 'W' : '-',
60			(entry & X86_PAE_PTE_USER) != 0 ? 'U' : '-',
61			(entry & X86_PAE_PTE_NOT_EXECUTABLE) != 0 ? '-' : 'X',
62			(entry & X86_PAE_PTE_GLOBAL) != 0 ? 'G' : '-',
63			(entry & X86_PAE_PTE_WRITE_THROUGH) != 0 ? "WT" : "--",
64			(entry & X86_PAE_PTE_CACHING_DISABLED) != 0 ? "UC" : "--",
65			(entry & X86_PAE_PTE_ACCESSED) != 0 ? 'A' : '-',
66			(entry & X86_PAE_PTE_DIRTY) != 0 ? 'D' : '-');
67	}
68};
69
70
71class Map : public TranslationMapTraceEntryBase {
72public:
73	Map(X86VMTranslationMapPAE* map, addr_t virtualAddress,
74		pae_page_table_entry entry)
75		:
76		TranslationMapTraceEntryBase(),
77		fMap(map),
78		fVirtualAddress(virtualAddress),
79		fEntry(entry)
80	{
81		Initialized();
82	}
83
84	virtual void AddDump(TraceOutput& out)
85	{
86		out.Print("translation map map: %p: %#" B_PRIxADDR " -> ", fMap,
87			fVirtualAddress);
88		PrintPageTableEntry(out, fEntry);
89	}
90
91private:
92	X86VMTranslationMapPAE*	fMap;
93	addr_t					fVirtualAddress;
94	pae_page_table_entry	fEntry;
95};
96
97
98class Unmap : public TranslationMapTraceEntryBase {
99public:
100	Unmap(X86VMTranslationMapPAE* map, addr_t virtualAddress,
101		pae_page_table_entry entry)
102		:
103		TranslationMapTraceEntryBase(),
104		fMap(map),
105		fVirtualAddress(virtualAddress),
106		fEntry(entry)
107	{
108		Initialized();
109	}
110
111	virtual void AddDump(TraceOutput& out)
112	{
113		out.Print("translation map unmap: %p: %#" B_PRIxADDR
114			" -> ", fMap, fVirtualAddress);
115		PrintPageTableEntry(out, fEntry);
116	}
117
118private:
119	X86VMTranslationMapPAE*	fMap;
120	addr_t					fVirtualAddress;
121	pae_page_table_entry	fEntry;
122};
123
124
125class Protect : public TranslationMapTraceEntryBase {
126public:
127	Protect(X86VMTranslationMapPAE* map, addr_t virtualAddress,
128		pae_page_table_entry oldEntry, pae_page_table_entry newEntry)
129		:
130		TranslationMapTraceEntryBase(),
131		fMap(map),
132		fVirtualAddress(virtualAddress),
133		fOldEntry(oldEntry),
134		fNewEntry(newEntry)
135	{
136		Initialized();
137	}
138
139	virtual void AddDump(TraceOutput& out)
140	{
141		out.Print("translation map protect: %p: %#" B_PRIxADDR
142			" -> ", fMap, fVirtualAddress);
143		PrintPageTableEntry(out, fNewEntry);
144		out.Print(" (%c%c%c)",
145			(fOldEntry & X86_PAE_PTE_WRITABLE) != 0 ? 'W' : '-',
146			(fOldEntry & X86_PAE_PTE_USER) != 0 ? 'U' : '-',
147			(fOldEntry & X86_PAE_PTE_NOT_EXECUTABLE) != 0 ? '-' : 'X');
148	}
149
150private:
151	X86VMTranslationMapPAE*	fMap;
152	addr_t					fVirtualAddress;
153	pae_page_table_entry	fOldEntry;
154	pae_page_table_entry	fNewEntry;
155};
156
157
158class ClearFlags : public TranslationMapTraceEntryBase {
159public:
160	ClearFlags(X86VMTranslationMapPAE* map, addr_t virtualAddress,
161		pae_page_table_entry oldEntry, pae_page_table_entry flagsCleared)
162		:
163		TranslationMapTraceEntryBase(),
164		fMap(map),
165		fVirtualAddress(virtualAddress),
166		fOldEntry(oldEntry),
167		fFlagsCleared(flagsCleared)
168	{
169		Initialized();
170	}
171
172	virtual void AddDump(TraceOutput& out)
173	{
174		out.Print("translation map clear flags: %p: %#" B_PRIxADDR
175			" -> ", fMap, fVirtualAddress);
176		PrintPageTableEntry(out, fOldEntry & ~fFlagsCleared);
177		out.Print(", cleared %c%c (%c%c)",
178			(fOldEntry & fFlagsCleared & X86_PAE_PTE_ACCESSED) != 0 ? 'A' : '-',
179			(fOldEntry & fFlagsCleared & X86_PAE_PTE_DIRTY) != 0 ? 'D' : '-',
180			(fFlagsCleared & X86_PAE_PTE_ACCESSED) != 0 ? 'A' : '-',
181			(fFlagsCleared & X86_PAE_PTE_DIRTY) != 0 ? 'D' : '-');
182	}
183
184private:
185	X86VMTranslationMapPAE*	fMap;
186	addr_t					fVirtualAddress;
187	pae_page_table_entry	fOldEntry;
188	pae_page_table_entry	fFlagsCleared;
189};
190
191
192class ClearFlagsUnmap : public TranslationMapTraceEntryBase {
193public:
194	ClearFlagsUnmap(X86VMTranslationMapPAE* map, addr_t virtualAddress,
195		pae_page_table_entry entry)
196		:
197		TranslationMapTraceEntryBase(),
198		fMap(map),
199		fVirtualAddress(virtualAddress),
200		fEntry(entry)
201	{
202		Initialized();
203	}
204
205	virtual void AddDump(TraceOutput& out)
206	{
207		out.Print("translation map clear flags unmap: %p: %#" B_PRIxADDR
208			" -> ", fMap, fVirtualAddress);
209		PrintPageTableEntry(out, fEntry);
210	}
211
212private:
213	X86VMTranslationMapPAE*	fMap;
214	addr_t					fVirtualAddress;
215	pae_page_table_entry	fEntry;
216};
217
218
219}	// namespace TranslationMapTracing
220
221#	define T(x)	new(std::nothrow) TranslationMapTracing::x
222
223#else
224#	define T(x)
225#endif	// TRANSLATION_MAP_TRACING
226
227
228
229X86VMTranslationMapPAE::X86VMTranslationMapPAE()
230	:
231	fPagingStructures(NULL)
232{
233}
234
235
236X86VMTranslationMapPAE::~X86VMTranslationMapPAE()
237{
238	if (fPagingStructures == NULL)
239		return;
240
241	if (fPageMapper != NULL)
242		fPageMapper->Delete();
243
244	// cycle through and free all of the user space page tables
245
246	STATIC_ASSERT(KERNEL_BASE == 0x80000000 && KERNEL_SIZE == 0x80000000);
247		// assuming 1-1 split of the address space
248
249	for (uint32 k = 0; k < 2; k++) {
250		pae_page_directory_entry* pageDir
251			= fPagingStructures->VirtualPageDirs()[k];
252		if (pageDir == NULL)
253			continue;
254
255		for (uint32 i = 0; i < kPAEPageDirEntryCount; i++) {
256			if ((pageDir[i] & X86_PAE_PDE_PRESENT) != 0) {
257				phys_addr_t address = pageDir[i] & X86_PAE_PDE_ADDRESS_MASK;
258				vm_page* page = vm_lookup_page(address / B_PAGE_SIZE);
259				if (page == NULL)
260					panic("X86VMTranslationMapPAE::~X86VMTranslationMapPAE: "
261						"didn't find page table page: page address: %#"
262						B_PRIxPHYSADDR ", virtual base: %#" B_PRIxADDR "\n",
263						address,
264						(k * kPAEPageDirEntryCount + i) * kPAEPageTableRange);
265				DEBUG_PAGE_ACCESS_START(page);
266				vm_page_set_state(page, PAGE_STATE_FREE);
267			}
268		}
269	}
270
271	fPagingStructures->RemoveReference();
272}
273
274
275status_t
276X86VMTranslationMapPAE::Init(bool kernel)
277{
278	TRACE("X86VMTranslationMapPAE::Init()\n");
279
280	X86VMTranslationMap::Init(kernel);
281
282	fPagingStructures = new(std::nothrow) X86PagingStructuresPAE;
283	if (fPagingStructures == NULL)
284		return B_NO_MEMORY;
285
286	X86PagingMethodPAE* method = X86PagingMethodPAE::Method();
287
288	if (kernel) {
289		// kernel
290		// get the physical page mapper
291		fPageMapper = method->KernelPhysicalPageMapper();
292
293		// we already know the kernel pgdir mapping
294		fPagingStructures->Init(method->KernelVirtualPageDirPointerTable(),
295			method->KernelPhysicalPageDirPointerTable(), NULL,
296			method->KernelVirtualPageDirs(), method->KernelPhysicalPageDirs());
297	} else {
298		// user
299		// allocate a physical page mapper
300		status_t error = method->PhysicalPageMapper()
301			->CreateTranslationMapPhysicalPageMapper(&fPageMapper);
302		if (error != B_OK)
303			return error;
304
305		// The following code assumes that the kernel address space occupies the
306		// upper half of the virtual address space. This simplifies things a
307		// lot, since it allows us to just use the upper two page directories
308		// of the kernel and create two new lower page directories for the
309		// userland.
310		STATIC_ASSERT(KERNEL_BASE == 0x80000000 && KERNEL_SIZE == 0x80000000);
311
312		// allocate the page directories (both at once)
313		pae_page_directory_entry* virtualPageDirs[4];
314		phys_addr_t physicalPageDirs[4];
315		virtualPageDirs[0] = (pae_page_directory_entry*)memalign(B_PAGE_SIZE,
316			2 * B_PAGE_SIZE);
317		if (virtualPageDirs[0] == NULL)
318			return B_NO_MEMORY;
319		virtualPageDirs[1] = virtualPageDirs[0] + kPAEPageTableEntryCount;
320
321		// clear the userland page directories
322		memset(virtualPageDirs[0], 0, 2 * B_PAGE_SIZE);
323
324		// use the upper two kernel page directories
325		for (int32 i = 2; i < 4; i++) {
326			virtualPageDirs[i] = method->KernelVirtualPageDirs()[i];
327			physicalPageDirs[i] = method->KernelPhysicalPageDirs()[i];
328		}
329
330		// look up the page directories' physical addresses
331		for (int32 i = 0; i < 2; i++) {
332			vm_get_page_mapping(VMAddressSpace::KernelID(),
333				(addr_t)virtualPageDirs[i], &physicalPageDirs[i]);
334		}
335
336		// allocate the PDPT -- needs to have a 32 bit physical address
337		phys_addr_t physicalPDPT;
338		void* pdptHandle;
339		pae_page_directory_pointer_table_entry* pdpt
340			= (pae_page_directory_pointer_table_entry*)
341				method->Allocate32BitPage(physicalPDPT, pdptHandle);
342		if (pdpt == NULL) {
343			free(virtualPageDirs[0]);
344			return B_NO_MEMORY;
345		}
346
347		// init the PDPT entries
348		for (int32 i = 0; i < 4; i++) {
349			pdpt[i] = (physicalPageDirs[i] & X86_PAE_PDPTE_ADDRESS_MASK)
350				| X86_PAE_PDPTE_PRESENT;
351		}
352
353		// init the paging structures
354		fPagingStructures->Init(pdpt, physicalPDPT, pdptHandle, virtualPageDirs,
355			physicalPageDirs);
356	}
357
358	return B_OK;
359}
360
361
362size_t
363X86VMTranslationMapPAE::MaxPagesNeededToMap(addr_t start, addr_t end) const
364{
365	// If start == 0, the actual base address is not yet known to the caller and
366	// we shall assume the worst case.
367	if (start == 0) {
368		// offset the range so it has the worst possible alignment
369		start = kPAEPageTableRange - B_PAGE_SIZE;
370		end += kPAEPageTableRange - B_PAGE_SIZE;
371	}
372
373	return end / kPAEPageTableRange + 1 - start / kPAEPageTableRange;
374}
375
376
377status_t
378X86VMTranslationMapPAE::Map(addr_t virtualAddress, phys_addr_t physicalAddress,
379	uint32 attributes, uint32 memoryType, vm_page_reservation* reservation)
380{
381	TRACE("X86VMTranslationMapPAE::Map(): %#" B_PRIxADDR " -> %#" B_PRIxPHYSADDR
382		"\n", virtualAddress, physicalAddress);
383
384	// check to see if a page table exists for this range
385	pae_page_directory_entry* pageDirEntry
386		= X86PagingMethodPAE::PageDirEntryForAddress(
387			fPagingStructures->VirtualPageDirs(), virtualAddress);
388	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
389		// we need to allocate a page table
390		vm_page *page = vm_page_allocate_page(reservation,
391			PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
392
393		DEBUG_PAGE_ACCESS_END(page);
394
395		phys_addr_t physicalPageTable
396			= (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
397
398		TRACE("X86VMTranslationMapPAE::Map(): asked for free page for "
399			"page table: %#" B_PRIxPHYSADDR "\n", physicalPageTable);
400
401		// put it in the page dir
402		X86PagingMethodPAE::PutPageTableInPageDir(pageDirEntry,
403			physicalPageTable,
404			attributes
405				| ((attributes & B_USER_PROTECTION) != 0
406						? B_WRITE_AREA : B_KERNEL_WRITE_AREA));
407
408		fMapCount++;
409	}
410
411	// now, fill in the page table entry
412	Thread* thread = thread_get_current_thread();
413	ThreadCPUPinner pinner(thread);
414
415	pae_page_table_entry* pageTable
416		= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
417			*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
418	pae_page_table_entry* entry = pageTable
419		+ virtualAddress / B_PAGE_SIZE % kPAEPageTableEntryCount;
420
421	ASSERT_PRINT((*entry & X86_PAE_PTE_PRESENT) == 0,
422		"virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx64 " @ %p",
423		virtualAddress, *entry, entry);
424
425	X86PagingMethodPAE::PutPageTableEntryInTable(entry, physicalAddress,
426		attributes, memoryType, fIsKernelMap);
427
428	T(Map(this, virtualAddress, *entry));
429
430	pinner.Unlock();
431
432	// Note: We don't need to invalidate the TLB for this address, as previously
433	// the entry was not present and the TLB doesn't cache those entries.
434
435	fMapCount++;
436
437	return 0;
438}
439
440
441status_t
442X86VMTranslationMapPAE::Unmap(addr_t start, addr_t end)
443{
444	start = ROUNDDOWN(start, B_PAGE_SIZE);
445	if (start >= end)
446		return B_OK;
447
448	TRACE("X86VMTranslationMapPAE::Unmap(): %#" B_PRIxADDR " - %#" B_PRIxADDR
449		"\n", start, end);
450
451	do {
452		pae_page_directory_entry* pageDirEntry
453			= X86PagingMethodPAE::PageDirEntryForAddress(
454				fPagingStructures->VirtualPageDirs(), start);
455		if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
456			// no page table here, move the start up to access the next page
457			// table
458			start = ROUNDUP(start + 1, kPAEPageTableRange);
459			continue;
460		}
461
462		Thread* thread = thread_get_current_thread();
463		ThreadCPUPinner pinner(thread);
464
465		pae_page_table_entry* pageTable
466			= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
467				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
468
469		uint32 index = start / B_PAGE_SIZE % kPAEPageTableEntryCount;
470		for (; index < kPAEPageTableEntryCount && start < end;
471				index++, start += B_PAGE_SIZE) {
472			if ((pageTable[index] & X86_PAE_PTE_PRESENT) == 0) {
473				// page mapping not valid
474				continue;
475			}
476
477			TRACE("X86VMTranslationMapPAE::Unmap(): removing page %#"
478				B_PRIxADDR "\n", start);
479
480			pae_page_table_entry oldEntry
481				= X86PagingMethodPAE::ClearTableEntryFlags(
482					&pageTable[index], X86_PAE_PTE_PRESENT);
483
484			T(Unmap(this, start, oldEntry));
485
486			fMapCount--;
487
488			if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
489				// Note, that we only need to invalidate the address, if the
490				// accessed flags was set, since only then the entry could have
491				// been in any TLB.
492				InvalidatePage(start);
493			}
494		}
495	} while (start != 0 && start < end);
496
497	return B_OK;
498}
499
500
501status_t
502X86VMTranslationMapPAE::DebugMarkRangePresent(addr_t start, addr_t end,
503	bool markPresent)
504{
505	start = ROUNDDOWN(start, B_PAGE_SIZE);
506	if (start >= end)
507		return B_OK;
508
509	do {
510		pae_page_directory_entry* pageDirEntry
511			= X86PagingMethodPAE::PageDirEntryForAddress(
512				fPagingStructures->VirtualPageDirs(), start);
513		if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
514			// no page table here, move the start up to access the next page
515			// table
516			start = ROUNDUP(start + 1, kPAEPageTableRange);
517			continue;
518		}
519
520		Thread* thread = thread_get_current_thread();
521		ThreadCPUPinner pinner(thread);
522
523		pae_page_table_entry* pageTable
524			= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
525				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
526
527		uint32 index = start / B_PAGE_SIZE % kPAEPageTableEntryCount;
528		for (; index < kPAEPageTableEntryCount && start < end;
529				index++, start += B_PAGE_SIZE) {
530
531			if ((pageTable[index] & X86_PAE_PTE_PRESENT) == 0) {
532				if (!markPresent)
533					continue;
534
535				X86PagingMethodPAE::SetTableEntryFlags(
536					&pageTable[index], X86_PAE_PTE_PRESENT);
537			} else {
538				if (markPresent)
539					continue;
540
541				pae_page_table_entry oldEntry
542					= X86PagingMethodPAE::ClearTableEntryFlags(
543						&pageTable[index], X86_PAE_PTE_PRESENT);
544
545				if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
546					// Note, that we only need to invalidate the address, if the
547					// accessed flags was set, since only then the entry could
548					// have been in any TLB.
549					InvalidatePage(start);
550				}
551			}
552		}
553	} while (start != 0 && start < end);
554
555	return B_OK;
556}
557
558
559/*!	Caller must have locked the cache of the page to be unmapped.
560	This object shouldn't be locked.
561*/
562status_t
563X86VMTranslationMapPAE::UnmapPage(VMArea* area, addr_t address,
564	bool updatePageQueue)
565{
566	ASSERT(address % B_PAGE_SIZE == 0);
567
568	pae_page_directory_entry* pageDirEntry
569		= X86PagingMethodPAE::PageDirEntryForAddress(
570			fPagingStructures->VirtualPageDirs(), address);
571
572	TRACE("X86VMTranslationMapPAE::UnmapPage(%#" B_PRIxADDR ")\n", address);
573
574	RecursiveLocker locker(fLock);
575
576	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0)
577		return B_ENTRY_NOT_FOUND;
578
579	ThreadCPUPinner pinner(thread_get_current_thread());
580
581	pae_page_table_entry* pageTable
582		= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
583			*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
584
585	pae_page_table_entry oldEntry = X86PagingMethodPAE::ClearTableEntry(
586		&pageTable[address / B_PAGE_SIZE % kPAEPageTableEntryCount]);
587
588	T(Unmap(this, address, oldEntry));
589
590	pinner.Unlock();
591
592	if ((oldEntry & X86_PAE_PTE_PRESENT) == 0) {
593		// page mapping not valid
594		return B_ENTRY_NOT_FOUND;
595	}
596
597	fMapCount--;
598
599	if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
600		// Note, that we only need to invalidate the address, if the
601		// accessed flags was set, since only then the entry could have been
602		// in any TLB.
603		InvalidatePage(address);
604
605		Flush();
606
607		// NOTE: Between clearing the page table entry and Flush() other
608		// processors (actually even this processor with another thread of the
609		// same team) could still access the page in question via their cached
610		// entry. We can obviously lose a modified flag in this case, with the
611		// effect that the page looks unmodified (and might thus be recycled),
612		// but is actually modified.
613		// In most cases this is harmless, but for vm_remove_all_page_mappings()
614		// this is actually a problem.
615		// Interestingly FreeBSD seems to ignore this problem as well
616		// (cf. pmap_remove_all()), unless I've missed something.
617	}
618
619	locker.Detach();
620		// PageUnmapped() will unlock for us
621
622	PageUnmapped(area, (oldEntry & X86_PAE_PTE_ADDRESS_MASK) / B_PAGE_SIZE,
623		(oldEntry & X86_PAE_PTE_ACCESSED) != 0,
624		(oldEntry & X86_PAE_PTE_DIRTY) != 0, updatePageQueue);
625
626	return B_OK;
627}
628
629
630void
631X86VMTranslationMapPAE::UnmapPages(VMArea* area, addr_t base, size_t size,
632	bool updatePageQueue)
633{
634	if (size == 0)
635		return;
636
637	addr_t start = base;
638	addr_t end = base + size - 1;
639
640	TRACE("X86VMTranslationMapPAE::UnmapPages(%p, %#" B_PRIxADDR ", %#"
641		B_PRIxADDR ")\n", area, start, end);
642
643	VMAreaMappings queue;
644
645	RecursiveLocker locker(fLock);
646
647	do {
648		pae_page_directory_entry* pageDirEntry
649			= X86PagingMethodPAE::PageDirEntryForAddress(
650				fPagingStructures->VirtualPageDirs(), start);
651		if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
652			// no page table here, move the start up to access the next page
653			// table
654			start = ROUNDUP(start + 1, kPAEPageTableRange);
655			continue;
656		}
657
658		Thread* thread = thread_get_current_thread();
659		ThreadCPUPinner pinner(thread);
660
661		pae_page_table_entry* pageTable
662			= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
663				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
664
665		uint32 index = start / B_PAGE_SIZE % kPAEPageTableEntryCount;
666		for (; index < kPAEPageTableEntryCount && start < end;
667				index++, start += B_PAGE_SIZE) {
668			pae_page_table_entry oldEntry
669				= X86PagingMethodPAE::ClearTableEntry(&pageTable[index]);
670			if ((oldEntry & X86_PAE_PTE_PRESENT) == 0)
671				continue;
672
673			T(Unmap(this, start, oldEntry));
674
675			fMapCount--;
676
677			if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
678				// Note, that we only need to invalidate the address, if the
679				// accessed flags was set, since only then the entry could have
680				// been in any TLB.
681				InvalidatePage(start);
682			}
683
684			if (area->cache_type != CACHE_TYPE_DEVICE) {
685				// get the page
686				vm_page* page = vm_lookup_page(
687					(oldEntry & X86_PAE_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
688				ASSERT(page != NULL);
689
690				DEBUG_PAGE_ACCESS_START(page);
691
692				// transfer the accessed/dirty flags to the page
693				if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0)
694					page->accessed = true;
695				if ((oldEntry & X86_PAE_PTE_DIRTY) != 0)
696					page->modified = true;
697
698				// remove the mapping object/decrement the wired_count of the
699				// page
700				if (area->wiring == B_NO_LOCK) {
701					vm_page_mapping* mapping = NULL;
702					vm_page_mappings::Iterator iterator
703						= page->mappings.GetIterator();
704					while ((mapping = iterator.Next()) != NULL) {
705						if (mapping->area == area)
706							break;
707					}
708
709					ASSERT(mapping != NULL);
710
711					area->mappings.Remove(mapping);
712					page->mappings.Remove(mapping);
713					queue.Add(mapping);
714				} else
715					page->DecrementWiredCount();
716
717				if (!page->IsMapped()) {
718					atomic_add(&gMappedPagesCount, -1);
719
720					if (updatePageQueue) {
721						if (page->Cache()->temporary)
722							vm_page_set_state(page, PAGE_STATE_INACTIVE);
723						else if (page->modified)
724							vm_page_set_state(page, PAGE_STATE_MODIFIED);
725						else
726							vm_page_set_state(page, PAGE_STATE_CACHED);
727					}
728				}
729
730				DEBUG_PAGE_ACCESS_END(page);
731			}
732		}
733
734		Flush();
735			// flush explicitly, since we directly use the lock
736	} while (start != 0 && start < end);
737
738	// TODO: As in UnmapPage() we can lose page dirty flags here. ATM it's not
739	// really critical here, as in all cases this method is used, the unmapped
740	// area range is unmapped for good (resized/cut) and the pages will likely
741	// be freed.
742
743	locker.Unlock();
744
745	// free removed mappings
746	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
747	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
748		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
749	while (vm_page_mapping* mapping = queue.RemoveHead())
750		object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
751}
752
753
754void
755X86VMTranslationMapPAE::UnmapArea(VMArea* area, bool deletingAddressSpace,
756	bool ignoreTopCachePageFlags)
757{
758	if (area->cache_type == CACHE_TYPE_DEVICE || area->wiring != B_NO_LOCK) {
759		X86VMTranslationMapPAE::UnmapPages(area, area->Base(), area->Size(),
760			true);
761		return;
762	}
763
764	bool unmapPages = !deletingAddressSpace || !ignoreTopCachePageFlags;
765
766	RecursiveLocker locker(fLock);
767
768	VMAreaMappings mappings;
769	mappings.MoveFrom(&area->mappings);
770
771	for (VMAreaMappings::Iterator it = mappings.GetIterator();
772			vm_page_mapping* mapping = it.Next();) {
773		vm_page* page = mapping->page;
774		page->mappings.Remove(mapping);
775
776		VMCache* cache = page->Cache();
777
778		bool pageFullyUnmapped = false;
779		if (!page->IsMapped()) {
780			atomic_add(&gMappedPagesCount, -1);
781			pageFullyUnmapped = true;
782		}
783
784		if (unmapPages || cache != area->cache) {
785			addr_t address = area->Base()
786				+ ((page->cache_offset * B_PAGE_SIZE) - area->cache_offset);
787
788			pae_page_directory_entry* pageDirEntry
789				= X86PagingMethodPAE::PageDirEntryForAddress(
790					fPagingStructures->VirtualPageDirs(), address);
791			if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
792				panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
793					"has no page dir entry", page, area, address);
794				continue;
795			}
796
797			ThreadCPUPinner pinner(thread_get_current_thread());
798
799			pae_page_table_entry* pageTable
800				= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
801					*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
802			pae_page_table_entry oldEntry
803				= X86PagingMethodPAE::ClearTableEntry(
804					&pageTable[address / B_PAGE_SIZE
805						% kPAEPageTableEntryCount]);
806
807			pinner.Unlock();
808
809			if ((oldEntry & X86_PAE_PTE_PRESENT) == 0) {
810				panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
811					"has no page table entry", page, area, address);
812				continue;
813			}
814
815			T(Unmap(this, address, oldEntry));
816
817			// transfer the accessed/dirty flags to the page and invalidate
818			// the mapping, if necessary
819			if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
820				page->accessed = true;
821
822				if (!deletingAddressSpace)
823					InvalidatePage(address);
824			}
825
826			if ((oldEntry & X86_PAE_PTE_DIRTY) != 0)
827				page->modified = true;
828
829			if (pageFullyUnmapped) {
830				DEBUG_PAGE_ACCESS_START(page);
831
832				if (cache->temporary)
833					vm_page_set_state(page, PAGE_STATE_INACTIVE);
834				else if (page->modified)
835					vm_page_set_state(page, PAGE_STATE_MODIFIED);
836				else
837					vm_page_set_state(page, PAGE_STATE_CACHED);
838
839				DEBUG_PAGE_ACCESS_END(page);
840			}
841		} else {
842#if TRANSLATION_MAP_TRACING
843			addr_t address = area->Base()
844				+ ((page->cache_offset * B_PAGE_SIZE) - area->cache_offset);
845
846			ThreadCPUPinner pinner(thread_get_current_thread());
847
848			pae_page_directory_entry* pageDirEntry
849				= X86PagingMethodPAE::PageDirEntryForAddress(
850					fPagingStructures->VirtualPageDirs(), address);
851			if ((*pageDirEntry & X86_PAE_PDE_PRESENT) != 0) {
852				pae_page_table_entry* pageTable
853					= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
854						*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
855				pae_page_table_entry oldEntry = pageTable[
856					address / B_PAGE_SIZE % kPAEPageTableEntryCount];
857
858				pinner.Unlock();
859
860				if ((oldEntry & X86_PAE_PTE_PRESENT) != 0)
861					T(Unmap(this, address, oldEntry));
862			}
863#endif
864		}
865
866		fMapCount--;
867	}
868
869	Flush();
870		// flush explicitely, since we directly use the lock
871
872	locker.Unlock();
873
874	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
875	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
876		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
877	while (vm_page_mapping* mapping = mappings.RemoveHead())
878		object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
879}
880
881
882status_t
883X86VMTranslationMapPAE::Query(addr_t virtualAddress,
884	phys_addr_t* _physicalAddress, uint32* _flags)
885{
886	// default the flags to not present
887	*_flags = 0;
888	*_physicalAddress = 0;
889
890	// get the page directory entry
891	pae_page_directory_entry* pageDirEntry
892		= X86PagingMethodPAE::PageDirEntryForAddress(
893			fPagingStructures->VirtualPageDirs(), virtualAddress);
894	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
895		// no pagetable here
896		return B_OK;
897	}
898
899	// get the page table entry
900	Thread* thread = thread_get_current_thread();
901	ThreadCPUPinner pinner(thread);
902
903	pae_page_table_entry* pageTable
904		= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
905			*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
906	pae_page_table_entry entry
907		= pageTable[virtualAddress / B_PAGE_SIZE % kPAEPageTableEntryCount];
908
909	pinner.Unlock();
910
911	*_physicalAddress = entry & X86_PAE_PTE_ADDRESS_MASK;
912
913	// translate the page state flags
914	if ((entry & X86_PAE_PTE_USER) != 0) {
915		*_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
916			| B_READ_AREA
917			| ((entry & X86_PAE_PTE_NOT_EXECUTABLE) == 0 ? B_EXECUTE_AREA : 0);
918	}
919
920	*_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
921		| B_KERNEL_READ_AREA
922		| ((entry & X86_PAE_PTE_NOT_EXECUTABLE) == 0
923			? B_KERNEL_EXECUTE_AREA : 0)
924		| ((entry & X86_PAE_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
925		| ((entry & X86_PAE_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
926		| ((entry & X86_PAE_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
927
928	TRACE("X86VMTranslationMapPAE::Query(%#" B_PRIxADDR ") -> %#"
929		B_PRIxPHYSADDR ":\n", virtualAddress, *_physicalAddress);
930
931	return B_OK;
932}
933
934
935status_t
936X86VMTranslationMapPAE::QueryInterrupt(addr_t virtualAddress,
937	phys_addr_t* _physicalAddress, uint32* _flags)
938{
939	// default the flags to not present
940	*_flags = 0;
941	*_physicalAddress = 0;
942
943	// get the page directory entry
944	pae_page_directory_entry* pageDirEntry
945		= X86PagingMethodPAE::PageDirEntryForAddress(
946			fPagingStructures->VirtualPageDirs(), virtualAddress);
947	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
948		// no pagetable here
949		return B_OK;
950	}
951
952	// get the page table entry
953	pae_page_table_entry* pageTable
954		= (pae_page_table_entry*)X86PagingMethodPAE::Method()
955			->PhysicalPageMapper()->InterruptGetPageTableAt(
956				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
957	pae_page_table_entry entry
958		= pageTable[virtualAddress / B_PAGE_SIZE % kPAEPageTableEntryCount];
959
960	*_physicalAddress = entry & X86_PAE_PTE_ADDRESS_MASK;
961
962	// translate the page state flags
963	if ((entry & X86_PAE_PTE_USER) != 0) {
964		*_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
965			| B_READ_AREA
966			| ((entry & X86_PAE_PTE_NOT_EXECUTABLE) == 0 ? B_EXECUTE_AREA : 0);
967	}
968
969	*_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
970		| B_KERNEL_READ_AREA
971		| ((entry & X86_PAE_PTE_NOT_EXECUTABLE) == 0
972			? B_KERNEL_EXECUTE_AREA : 0)
973		| ((entry & X86_PAE_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
974		| ((entry & X86_PAE_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
975		| ((entry & X86_PAE_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
976
977	TRACE("X86VMTranslationMapPAE::Query(%#" B_PRIxADDR ") -> %#"
978		B_PRIxPHYSADDR ":\n", virtualAddress, *_physicalAddress);
979
980	return B_OK;
981}
982
983
984status_t
985X86VMTranslationMapPAE::Protect(addr_t start, addr_t end, uint32 attributes,
986	uint32 memoryType)
987{
988	start = ROUNDDOWN(start, B_PAGE_SIZE);
989	if (start >= end)
990		return B_OK;
991
992	TRACE("X86VMTranslationMapPAE::Protect(): %#" B_PRIxADDR " - %#" B_PRIxADDR
993		", attributes: %#" B_PRIx32 "\n", start, end, attributes);
994
995	// compute protection/memory type flags
996	uint64 newFlags
997		= X86PagingMethodPAE::MemoryTypeToPageTableEntryFlags(memoryType);
998	if ((attributes & B_USER_PROTECTION) != 0) {
999		newFlags |= X86_PAE_PTE_USER;
1000		if ((attributes & B_WRITE_AREA) != 0)
1001			newFlags |= X86_PAE_PTE_WRITABLE;
1002		if ((attributes & B_EXECUTE_AREA) == 0
1003			&& x86_check_feature(IA32_FEATURE_AMD_EXT_NX, FEATURE_EXT_AMD)) {
1004			newFlags |= X86_PAE_PTE_NOT_EXECUTABLE;
1005		}
1006	} else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
1007		newFlags |= X86_PAE_PTE_WRITABLE;
1008
1009	do {
1010		pae_page_directory_entry* pageDirEntry
1011			= X86PagingMethodPAE::PageDirEntryForAddress(
1012				fPagingStructures->VirtualPageDirs(), start);
1013		if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
1014			// no page table here, move the start up to access the next page
1015			// table
1016			start = ROUNDUP(start + 1, kPAEPageTableRange);
1017			continue;
1018		}
1019
1020		Thread* thread = thread_get_current_thread();
1021		ThreadCPUPinner pinner(thread);
1022
1023		pae_page_table_entry* pageTable
1024			= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
1025				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
1026
1027		uint32 index = start / B_PAGE_SIZE % kPAEPageTableEntryCount;
1028		for (; index < kPAEPageTableEntryCount && start < end;
1029				index++, start += B_PAGE_SIZE) {
1030			pae_page_table_entry entry = pageTable[index];
1031			if ((pageTable[index] & X86_PAE_PTE_PRESENT) == 0) {
1032				// page mapping not valid
1033				continue;
1034			}
1035
1036			TRACE("X86VMTranslationMapPAE::Protect(): protect page %#"
1037				B_PRIxADDR "\n", start);
1038
1039			// set the new protection flags -- we want to do that atomically,
1040			// without changing the accessed or dirty flag
1041			pae_page_table_entry oldEntry;
1042			while (true) {
1043				oldEntry = X86PagingMethodPAE::TestAndSetTableEntry(
1044					&pageTable[index],
1045					(entry & ~(X86_PAE_PTE_PROTECTION_MASK
1046							| X86_PAE_PTE_MEMORY_TYPE_MASK))
1047						| newFlags,
1048					entry);
1049				if (oldEntry == entry)
1050					break;
1051				entry = oldEntry;
1052			}
1053
1054			T(Protect(this, start, entry,
1055				(entry & ~(X86_PAE_PTE_PROTECTION_MASK
1056						| X86_PAE_PTE_MEMORY_TYPE_MASK))
1057					| newFlags));
1058
1059			if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
1060				// Note, that we only need to invalidate the address, if the
1061				// accessed flag was set, since only then the entry could have been
1062				// in any TLB.
1063				InvalidatePage(start);
1064			}
1065		}
1066	} while (start != 0 && start < end);
1067
1068	return B_OK;
1069}
1070
1071
1072status_t
1073X86VMTranslationMapPAE::ClearFlags(addr_t address, uint32 flags)
1074{
1075	pae_page_directory_entry* pageDirEntry
1076		= X86PagingMethodPAE::PageDirEntryForAddress(
1077			fPagingStructures->VirtualPageDirs(), address);
1078	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
1079		// no pagetable here
1080		return B_OK;
1081	}
1082
1083	uint64 flagsToClear = ((flags & PAGE_MODIFIED) ? X86_PAE_PTE_DIRTY : 0)
1084		| ((flags & PAGE_ACCESSED) ? X86_PAE_PTE_ACCESSED : 0);
1085
1086	Thread* thread = thread_get_current_thread();
1087	ThreadCPUPinner pinner(thread);
1088
1089	pae_page_table_entry* entry
1090		= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
1091				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK)
1092			+ address / B_PAGE_SIZE % kPAEPageTableEntryCount;
1093
1094	// clear out the flags we've been requested to clear
1095	pae_page_table_entry oldEntry
1096		= X86PagingMethodPAE::ClearTableEntryFlags(entry, flagsToClear);
1097
1098	pinner.Unlock();
1099
1100	T(ClearFlags(this, address, oldEntry, flagsToClear));
1101
1102	if ((oldEntry & flagsToClear) != 0)
1103		InvalidatePage(address);
1104
1105	return B_OK;
1106}
1107
1108
1109bool
1110X86VMTranslationMapPAE::ClearAccessedAndModified(VMArea* area, addr_t address,
1111	bool unmapIfUnaccessed, bool& _modified)
1112{
1113	ASSERT(address % B_PAGE_SIZE == 0);
1114
1115	TRACE("X86VMTranslationMapPAE::ClearAccessedAndModified(%#" B_PRIxADDR
1116		")\n", address);
1117
1118	pae_page_directory_entry* pageDirEntry
1119		= X86PagingMethodPAE::PageDirEntryForAddress(
1120			fPagingStructures->VirtualPageDirs(), address);
1121
1122	RecursiveLocker locker(fLock);
1123
1124	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0)
1125		return false;
1126
1127	ThreadCPUPinner pinner(thread_get_current_thread());
1128
1129	pae_page_table_entry* entry
1130		= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
1131				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK)
1132			+ address / B_PAGE_SIZE % kPAEPageTableEntryCount;
1133
1134	// perform the deed
1135	pae_page_table_entry oldEntry;
1136
1137	if (unmapIfUnaccessed) {
1138		while (true) {
1139			oldEntry = *entry;
1140			if ((oldEntry & X86_PAE_PTE_PRESENT) == 0) {
1141				// page mapping not valid
1142				return false;
1143			}
1144
1145			if (oldEntry & X86_PAE_PTE_ACCESSED) {
1146				// page was accessed -- just clear the flags
1147				oldEntry = X86PagingMethodPAE::ClearTableEntryFlags(entry,
1148					X86_PAE_PTE_ACCESSED | X86_PAE_PTE_DIRTY);
1149				T(ClearFlags(this, address, oldEntry,
1150					X86_PAE_PTE_ACCESSED | X86_PAE_PTE_DIRTY));
1151				break;
1152			}
1153
1154			// page hasn't been accessed -- unmap it
1155			if (X86PagingMethodPAE::TestAndSetTableEntry(entry, 0, oldEntry)
1156					== oldEntry) {
1157				T(ClearFlagsUnmap(this, address, oldEntry));
1158				break;
1159			}
1160
1161			// something changed -- check again
1162		}
1163	} else {
1164		oldEntry = X86PagingMethodPAE::ClearTableEntryFlags(entry,
1165			X86_PAE_PTE_ACCESSED | X86_PAE_PTE_DIRTY);
1166		T(ClearFlags(this, address, oldEntry,
1167			X86_PAE_PTE_ACCESSED | X86_PAE_PTE_DIRTY));
1168	}
1169
1170	pinner.Unlock();
1171
1172	_modified = (oldEntry & X86_PAE_PTE_DIRTY) != 0;
1173
1174	if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
1175		// Note, that we only need to invalidate the address, if the
1176		// accessed flags was set, since only then the entry could have been
1177		// in any TLB.
1178		InvalidatePage(address);
1179		Flush();
1180
1181		return true;
1182	}
1183
1184	if (!unmapIfUnaccessed)
1185		return false;
1186
1187	// We have unmapped the address. Do the "high level" stuff.
1188
1189	fMapCount--;
1190
1191	locker.Detach();
1192		// UnaccessedPageUnmapped() will unlock for us
1193
1194	UnaccessedPageUnmapped(area,
1195		(oldEntry & X86_PAE_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
1196
1197	return false;
1198}
1199
1200
1201void
1202X86VMTranslationMapPAE::DebugPrintMappingInfo(addr_t virtualAddress)
1203{
1204	// get the page directory
1205	pae_page_directory_entry* const* pdpt
1206		= fPagingStructures->VirtualPageDirs();
1207	pae_page_directory_entry* pageDirectory = pdpt[virtualAddress >> 30];
1208	kprintf("page directory: %p (PDPT[%zu])\n", pageDirectory,
1209		virtualAddress >> 30);
1210
1211	// get the page directory entry
1212	pae_page_directory_entry* pageDirEntry
1213		= X86PagingMethodPAE::PageDirEntryForAddress(pdpt, virtualAddress);
1214	kprintf("page directory entry %zu (%p): %#" B_PRIx64 "\n",
1215		pageDirEntry - pageDirectory, pageDirEntry, *pageDirEntry);
1216
1217	kprintf("  access: ");
1218	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) != 0)
1219		kprintf(" present");
1220	if ((*pageDirEntry & X86_PAE_PDE_WRITABLE) != 0)
1221		kprintf(" writable");
1222	if ((*pageDirEntry & X86_PAE_PDE_USER) != 0)
1223		kprintf(" user");
1224	if ((*pageDirEntry & X86_PAE_PDE_NOT_EXECUTABLE) == 0)
1225		kprintf(" executable");
1226	if ((*pageDirEntry & X86_PAE_PDE_LARGE_PAGE) != 0)
1227		kprintf(" large");
1228
1229	kprintf("\n  caching:");
1230	if ((*pageDirEntry & X86_PAE_PDE_WRITE_THROUGH) != 0)
1231		kprintf(" write-through");
1232	if ((*pageDirEntry & X86_PAE_PDE_CACHING_DISABLED) != 0)
1233		kprintf(" uncached");
1234
1235	kprintf("\n  flags:  ");
1236	if ((*pageDirEntry & X86_PAE_PDE_ACCESSED) != 0)
1237		kprintf(" accessed");
1238	kprintf("\n");
1239
1240	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0)
1241		return;
1242
1243	// get the page table entry
1244	pae_page_table_entry* pageTable
1245		= (pae_page_table_entry*)X86PagingMethodPAE::Method()
1246			->PhysicalPageMapper()->InterruptGetPageTableAt(
1247				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
1248	kprintf("page table: %#" B_PRIx64 "\n",
1249		*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
1250	size_t pteIndex = virtualAddress / B_PAGE_SIZE % kPAEPageTableEntryCount;
1251	pae_page_table_entry entry = pageTable[pteIndex];
1252	kprintf("page table entry %zu (phys: %#" B_PRIx64 "): %#" B_PRIx64 "\n",
1253		pteIndex,
1254		(*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK)
1255			+ pteIndex * sizeof(pae_page_table_entry),
1256		entry);
1257
1258	kprintf("  access: ");
1259	if ((entry & X86_PAE_PTE_PRESENT) != 0)
1260		kprintf(" present");
1261	if ((entry & X86_PAE_PTE_WRITABLE) != 0)
1262		kprintf(" writable");
1263	if ((entry & X86_PAE_PTE_USER) != 0)
1264		kprintf(" user");
1265	if ((entry & X86_PAE_PTE_NOT_EXECUTABLE) == 0)
1266		kprintf(" executable");
1267	if ((entry & X86_PAE_PTE_GLOBAL) == 0)
1268		kprintf(" global");
1269
1270	kprintf("\n  caching:");
1271	if ((entry & X86_PAE_PTE_WRITE_THROUGH) != 0)
1272		kprintf(" write-through");
1273	if ((entry & X86_PAE_PTE_CACHING_DISABLED) != 0)
1274		kprintf(" uncached");
1275	if ((entry & X86_PAE_PTE_PAT) != 0)
1276		kprintf(" PAT");
1277
1278	kprintf("\n  flags:  ");
1279	if ((entry & X86_PAE_PTE_ACCESSED) != 0)
1280		kprintf(" accessed");
1281	if ((entry & X86_PAE_PTE_DIRTY) != 0)
1282		kprintf(" dirty");
1283	kprintf("\n");
1284
1285	if ((entry & X86_PAE_PTE_PRESENT) != 0) {
1286		kprintf("  address: %#" B_PRIx64 "\n",
1287			entry & X86_PAE_PTE_ADDRESS_MASK);
1288	}
1289}
1290
1291
1292bool
1293X86VMTranslationMapPAE::DebugGetReverseMappingInfo(phys_addr_t physicalAddress,
1294	ReverseMappingInfoCallback& callback)
1295{
1296	pae_page_directory_entry* const* pdpt
1297		= fPagingStructures->VirtualPageDirs();
1298	for (uint32 pageDirIndex = fIsKernelMap ? 2 : 0;
1299		pageDirIndex < uint32(fIsKernelMap ? 4 : 2); pageDirIndex++) {
1300		// iterate through the page directory
1301		pae_page_directory_entry* pageDirectory = pdpt[pageDirIndex];
1302		for (uint32 pdeIndex = 0; pdeIndex < kPAEPageDirEntryCount;
1303			pdeIndex++) {
1304			pae_page_directory_entry& pageDirEntry = pageDirectory[pdeIndex];
1305			if ((pageDirEntry & X86_PAE_PDE_ADDRESS_MASK) == 0)
1306				continue;
1307
1308			// get and iterate through the page table
1309			pae_page_table_entry* pageTable
1310				= (pae_page_table_entry*)X86PagingMethodPAE::Method()
1311					->PhysicalPageMapper()->InterruptGetPageTableAt(
1312						pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
1313			for (uint32 pteIndex = 0; pteIndex < kPAEPageTableEntryCount;
1314				pteIndex++) {
1315				pae_page_table_entry entry = pageTable[pteIndex];
1316				if ((entry & X86_PAE_PTE_PRESENT) != 0
1317					&& (entry & X86_PAE_PTE_ADDRESS_MASK) == physicalAddress) {
1318					addr_t virtualAddress = pageDirIndex * kPAEPageDirRange
1319						+ pdeIndex * kPAEPageTableRange
1320						+ pteIndex * B_PAGE_SIZE;
1321					if (callback.HandleVirtualAddress(virtualAddress))
1322						return true;
1323				}
1324			}
1325		}
1326	}
1327
1328	return false;
1329}
1330
1331
1332X86PagingStructures*
1333X86VMTranslationMapPAE::PagingStructures() const
1334{
1335	return fPagingStructures;
1336}
1337
1338
1339#endif	// B_HAIKU_PHYSICAL_BITS == 64
1340