1/*
2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
5 *
6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
8 */
9
10
11#include "paging/pae/X86VMTranslationMapPAE.h"
12
13#include <int.h>
14#include <slab/Slab.h>
15#include <thread.h>
16#include <util/AutoLock.h>
17#include <vm/vm_page.h>
18#include <vm/VMAddressSpace.h>
19#include <vm/VMCache.h>
20
21#include "paging/pae/X86PagingMethodPAE.h"
22#include "paging/pae/X86PagingStructuresPAE.h"
23#include "paging/x86_physical_page_mapper.h"
24
25
26//#define TRACE_X86_VM_TRANSLATION_MAP_PAE
27#ifdef TRACE_X86_VM_TRANSLATION_MAP_PAE
28#	define TRACE(x...) dprintf(x)
29#else
30#	define TRACE(x...) ;
31#endif
32
33
34#if B_HAIKU_PHYSICAL_BITS == 64
35
36
37X86VMTranslationMapPAE::X86VMTranslationMapPAE()
38	:
39	fPagingStructures(NULL)
40{
41}
42
43
44X86VMTranslationMapPAE::~X86VMTranslationMapPAE()
45{
46	if (fPagingStructures == NULL)
47		return;
48
49	if (fPageMapper != NULL)
50		fPageMapper->Delete();
51
52	// cycle through and free all of the user space page tables
53
54	STATIC_ASSERT(KERNEL_BASE == 0x80000000 && KERNEL_SIZE == 0x80000000);
55		// assuming 1-1 split of the address space
56
57	for (uint32 k = 0; k < 2; k++) {
58		pae_page_directory_entry* pageDir
59			= fPagingStructures->VirtualPageDirs()[k];
60		if (pageDir == NULL)
61			continue;
62
63		for (uint32 i = 0; i < kPAEPageDirEntryCount; i++) {
64			if ((pageDir[i] & X86_PAE_PDE_PRESENT) != 0) {
65				phys_addr_t address = pageDir[i] & X86_PAE_PDE_ADDRESS_MASK;
66				vm_page* page = vm_lookup_page(address / B_PAGE_SIZE);
67				if (page == NULL)
68					panic("X86VMTranslationMapPAE::~X86VMTranslationMapPAE: "
69						"didn't find page table page: page address: %#"
70						B_PRIxPHYSADDR ", virtual base: %#" B_PRIxADDR "\n",
71						address,
72						(k * kPAEPageDirEntryCount + i) * kPAEPageTableRange);
73				DEBUG_PAGE_ACCESS_START(page);
74				vm_page_set_state(page, PAGE_STATE_FREE);
75			}
76		}
77	}
78
79	fPagingStructures->RemoveReference();
80}
81
82
83status_t
84X86VMTranslationMapPAE::Init(bool kernel)
85{
86	TRACE("X86VMTranslationMapPAE::Init()\n");
87
88	X86VMTranslationMap::Init(kernel);
89
90	fPagingStructures = new(std::nothrow) X86PagingStructuresPAE;
91	if (fPagingStructures == NULL)
92		return B_NO_MEMORY;
93
94	X86PagingMethodPAE* method = X86PagingMethodPAE::Method();
95
96	if (kernel) {
97		// kernel
98		// get the physical page mapper
99		fPageMapper = method->KernelPhysicalPageMapper();
100
101		// we already know the kernel pgdir mapping
102		fPagingStructures->Init(method->KernelVirtualPageDirPointerTable(),
103			method->KernelPhysicalPageDirPointerTable(), NULL,
104			method->KernelVirtualPageDirs(), method->KernelPhysicalPageDirs());
105	} else {
106		// user
107		// allocate a physical page mapper
108		status_t error = method->PhysicalPageMapper()
109			->CreateTranslationMapPhysicalPageMapper(&fPageMapper);
110		if (error != B_OK)
111			return error;
112
113		// The following code assumes that the kernel address space occupies the
114		// upper half of the virtual address space. This simplifies things a
115		// lot, since it allows us to just use the upper two page directories
116		// of the kernel and create two new lower page directories for the
117		// userland.
118		STATIC_ASSERT(KERNEL_BASE == 0x80000000 && KERNEL_SIZE == 0x80000000);
119
120		// allocate the page directories (both at once)
121		pae_page_directory_entry* virtualPageDirs[4];
122		phys_addr_t physicalPageDirs[4];
123		virtualPageDirs[0] = (pae_page_directory_entry*)memalign(B_PAGE_SIZE,
124			2 * B_PAGE_SIZE);
125		if (virtualPageDirs[0] == NULL)
126			return B_NO_MEMORY;
127		virtualPageDirs[1] = virtualPageDirs[0] + kPAEPageTableEntryCount;
128
129		// clear the userland page directories
130		memset(virtualPageDirs[0], 0, 2 * B_PAGE_SIZE);
131
132		// use the upper two kernel page directories
133		for (int32 i = 2; i < 4; i++) {
134			virtualPageDirs[i] = method->KernelVirtualPageDirs()[i];
135			physicalPageDirs[i] = method->KernelPhysicalPageDirs()[i];
136		}
137
138		// look up the page directories' physical addresses
139		for (int32 i = 0; i < 2; i++) {
140			vm_get_page_mapping(VMAddressSpace::KernelID(),
141				(addr_t)virtualPageDirs[i], &physicalPageDirs[i]);
142		}
143
144		// allocate the PDPT -- needs to have a 32 bit physical address
145		phys_addr_t physicalPDPT;
146		void* pdptHandle;
147		pae_page_directory_pointer_table_entry* pdpt
148			= (pae_page_directory_pointer_table_entry*)
149				method->Allocate32BitPage(physicalPDPT, pdptHandle);
150		if (pdpt == NULL) {
151			free(virtualPageDirs[0]);
152			return B_NO_MEMORY;
153		}
154
155		// init the PDPT entries
156		for (int32 i = 0; i < 4; i++) {
157			pdpt[i] = (physicalPageDirs[i] & X86_PAE_PDPTE_ADDRESS_MASK)
158				| X86_PAE_PDPTE_PRESENT;
159		}
160
161		// init the paging structures
162		fPagingStructures->Init(pdpt, physicalPDPT, pdptHandle, virtualPageDirs,
163			physicalPageDirs);
164	}
165
166	return B_OK;
167}
168
169
170size_t
171X86VMTranslationMapPAE::MaxPagesNeededToMap(addr_t start, addr_t end) const
172{
173	// If start == 0, the actual base address is not yet known to the caller and
174	// we shall assume the worst case.
175	if (start == 0) {
176		// offset the range so it has the worst possible alignment
177		start = kPAEPageTableRange - B_PAGE_SIZE;
178		end += kPAEPageTableRange - B_PAGE_SIZE;
179	}
180
181	return end / kPAEPageTableRange + 1 - start / kPAEPageTableRange;
182}
183
184
185status_t
186X86VMTranslationMapPAE::Map(addr_t virtualAddress, phys_addr_t physicalAddress,
187	uint32 attributes, uint32 memoryType, vm_page_reservation* reservation)
188{
189	TRACE("X86VMTranslationMapPAE::Map(): %#" B_PRIxADDR " -> %#" B_PRIxPHYSADDR
190		"\n", virtualAddress, physicalAddress);
191
192	// check to see if a page table exists for this range
193	pae_page_directory_entry* pageDirEntry
194		= X86PagingMethodPAE::PageDirEntryForAddress(
195			fPagingStructures->VirtualPageDirs(), virtualAddress);
196	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
197		// we need to allocate a page table
198		vm_page *page = vm_page_allocate_page(reservation,
199			PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
200
201		DEBUG_PAGE_ACCESS_END(page);
202
203		phys_addr_t physicalPageTable
204			= (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
205
206		TRACE("X86VMTranslationMapPAE::Map(): asked for free page for "
207			"page table: %#" B_PRIxPHYSADDR "\n", physicalPageTable);
208
209		// put it in the page dir
210		X86PagingMethodPAE::PutPageTableInPageDir(pageDirEntry,
211			physicalPageTable,
212			attributes
213				| ((attributes & B_USER_PROTECTION) != 0
214						? B_WRITE_AREA : B_KERNEL_WRITE_AREA));
215
216		fMapCount++;
217	}
218
219	// now, fill in the page table entry
220	Thread* thread = thread_get_current_thread();
221	ThreadCPUPinner pinner(thread);
222
223	pae_page_table_entry* pageTable
224		= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
225			*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
226	pae_page_table_entry* entry = pageTable
227		+ virtualAddress / B_PAGE_SIZE % kPAEPageTableEntryCount;
228
229	ASSERT_PRINT((*entry & X86_PAE_PTE_PRESENT) == 0,
230		"virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx64,
231		virtualAddress, *entry);
232
233	X86PagingMethodPAE::PutPageTableEntryInTable(entry, physicalAddress,
234		attributes, memoryType, fIsKernelMap);
235
236	pinner.Unlock();
237
238	// Note: We don't need to invalidate the TLB for this address, as previously
239	// the entry was not present and the TLB doesn't cache those entries.
240
241	fMapCount++;
242
243	return 0;
244}
245
246
247status_t
248X86VMTranslationMapPAE::Unmap(addr_t start, addr_t end)
249{
250	start = ROUNDDOWN(start, B_PAGE_SIZE);
251	if (start >= end)
252		return B_OK;
253
254	TRACE("X86VMTranslationMapPAE::Unmap(): %#" B_PRIxADDR " - %#" B_PRIxADDR
255		"\n", start, end);
256
257	do {
258		pae_page_directory_entry* pageDirEntry
259			= X86PagingMethodPAE::PageDirEntryForAddress(
260				fPagingStructures->VirtualPageDirs(), start);
261		if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
262			// no page table here, move the start up to access the next page
263			// table
264			start = ROUNDUP(start + 1, kPAEPageTableRange);
265			continue;
266		}
267
268		Thread* thread = thread_get_current_thread();
269		ThreadCPUPinner pinner(thread);
270
271		pae_page_table_entry* pageTable
272			= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
273				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
274
275		uint32 index = start / B_PAGE_SIZE % kPAEPageTableEntryCount;
276		for (; index < kPAEPageTableEntryCount && start < end;
277				index++, start += B_PAGE_SIZE) {
278			if ((pageTable[index] & X86_PAE_PTE_PRESENT) == 0) {
279				// page mapping not valid
280				continue;
281			}
282
283			TRACE("X86VMTranslationMapPAE::Unmap(): removing page %#"
284				B_PRIxADDR "\n", start);
285
286			pae_page_table_entry oldEntry
287				= X86PagingMethodPAE::ClearPageTableEntryFlags(
288					&pageTable[index], X86_PAE_PTE_PRESENT);
289			fMapCount--;
290
291			if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
292				// Note, that we only need to invalidate the address, if the
293				// accessed flags was set, since only then the entry could have
294				// been in any TLB.
295				InvalidatePage(start);
296			}
297		}
298	} while (start != 0 && start < end);
299
300	return B_OK;
301}
302
303
304status_t
305X86VMTranslationMapPAE::DebugMarkRangePresent(addr_t start, addr_t end,
306	bool markPresent)
307{
308	start = ROUNDDOWN(start, B_PAGE_SIZE);
309	if (start >= end)
310		return B_OK;
311
312	do {
313		pae_page_directory_entry* pageDirEntry
314			= X86PagingMethodPAE::PageDirEntryForAddress(
315				fPagingStructures->VirtualPageDirs(), start);
316		if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
317			// no page table here, move the start up to access the next page
318			// table
319			start = ROUNDUP(start + 1, kPAEPageTableRange);
320			continue;
321		}
322
323		Thread* thread = thread_get_current_thread();
324		ThreadCPUPinner pinner(thread);
325
326		pae_page_table_entry* pageTable
327			= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
328				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
329
330		uint32 index = start / B_PAGE_SIZE % kPAEPageTableEntryCount;
331		for (; index < kPAEPageTableEntryCount && start < end;
332				index++, start += B_PAGE_SIZE) {
333
334			if ((pageTable[index] & X86_PAE_PTE_PRESENT) == 0) {
335				if (!markPresent)
336					continue;
337
338				X86PagingMethodPAE::SetPageTableEntryFlags(
339					&pageTable[index], X86_PAE_PTE_PRESENT);
340			} else {
341				if (markPresent)
342					continue;
343
344				pae_page_table_entry oldEntry
345					= X86PagingMethodPAE::ClearPageTableEntryFlags(
346						&pageTable[index], X86_PAE_PTE_PRESENT);
347
348				if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
349					// Note, that we only need to invalidate the address, if the
350					// accessed flags was set, since only then the entry could
351					// have been in any TLB.
352					InvalidatePage(start);
353				}
354			}
355		}
356	} while (start != 0 && start < end);
357
358	return B_OK;
359}
360
361
362/*!	Caller must have locked the cache of the page to be unmapped.
363	This object shouldn't be locked.
364*/
365status_t
366X86VMTranslationMapPAE::UnmapPage(VMArea* area, addr_t address,
367	bool updatePageQueue)
368{
369	ASSERT(address % B_PAGE_SIZE == 0);
370
371	pae_page_directory_entry* pageDirEntry
372		= X86PagingMethodPAE::PageDirEntryForAddress(
373			fPagingStructures->VirtualPageDirs(), address);
374
375	TRACE("X86VMTranslationMapPAE::UnmapPage(%#" B_PRIxADDR ")\n", address);
376
377	RecursiveLocker locker(fLock);
378
379	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0)
380		return B_ENTRY_NOT_FOUND;
381
382	ThreadCPUPinner pinner(thread_get_current_thread());
383
384	pae_page_table_entry* pageTable
385		= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
386			*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
387
388	pae_page_table_entry oldEntry = X86PagingMethodPAE::ClearPageTableEntry(
389		&pageTable[address / B_PAGE_SIZE % kPAEPageTableEntryCount]);
390
391	pinner.Unlock();
392
393	if ((oldEntry & X86_PAE_PTE_PRESENT) == 0) {
394		// page mapping not valid
395		return B_ENTRY_NOT_FOUND;
396	}
397
398	fMapCount--;
399
400	if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
401		// Note, that we only need to invalidate the address, if the
402		// accessed flags was set, since only then the entry could have been
403		// in any TLB.
404		InvalidatePage(address);
405
406		Flush();
407
408		// NOTE: Between clearing the page table entry and Flush() other
409		// processors (actually even this processor with another thread of the
410		// same team) could still access the page in question via their cached
411		// entry. We can obviously lose a modified flag in this case, with the
412		// effect that the page looks unmodified (and might thus be recycled),
413		// but is actually modified.
414		// In most cases this is harmless, but for vm_remove_all_page_mappings()
415		// this is actually a problem.
416		// Interestingly FreeBSD seems to ignore this problem as well
417		// (cf. pmap_remove_all()), unless I've missed something.
418	}
419
420	locker.Detach();
421		// PageUnmapped() will unlock for us
422
423	PageUnmapped(area, (oldEntry & X86_PAE_PTE_ADDRESS_MASK) / B_PAGE_SIZE,
424		(oldEntry & X86_PAE_PTE_ACCESSED) != 0,
425		(oldEntry & X86_PAE_PTE_DIRTY) != 0, updatePageQueue);
426
427	return B_OK;
428}
429
430
431void
432X86VMTranslationMapPAE::UnmapPages(VMArea* area, addr_t base, size_t size,
433	bool updatePageQueue)
434{
435	if (size == 0)
436		return;
437
438	addr_t start = base;
439	addr_t end = base + size - 1;
440
441	TRACE("X86VMTranslationMapPAE::UnmapPages(%p, %#" B_PRIxADDR ", %#"
442		B_PRIxADDR ")\n", area, start, end);
443
444	VMAreaMappings queue;
445
446	RecursiveLocker locker(fLock);
447
448	do {
449		pae_page_directory_entry* pageDirEntry
450			= X86PagingMethodPAE::PageDirEntryForAddress(
451				fPagingStructures->VirtualPageDirs(), start);
452		if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
453			// no page table here, move the start up to access the next page
454			// table
455			start = ROUNDUP(start + 1, kPAEPageTableRange);
456			continue;
457		}
458
459		Thread* thread = thread_get_current_thread();
460		ThreadCPUPinner pinner(thread);
461
462		pae_page_table_entry* pageTable
463			= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
464				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
465
466		uint32 index = start / B_PAGE_SIZE % kPAEPageTableEntryCount;
467		for (; index < kPAEPageTableEntryCount && start < end;
468				index++, start += B_PAGE_SIZE) {
469			pae_page_table_entry oldEntry
470				= X86PagingMethodPAE::ClearPageTableEntry(&pageTable[index]);
471			if ((oldEntry & X86_PAE_PTE_PRESENT) == 0)
472				continue;
473
474			fMapCount--;
475
476			if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
477				// Note, that we only need to invalidate the address, if the
478				// accessed flags was set, since only then the entry could have
479				// been in any TLB.
480				InvalidatePage(start);
481			}
482
483			if (area->cache_type != CACHE_TYPE_DEVICE) {
484				// get the page
485				vm_page* page = vm_lookup_page(
486					(oldEntry & X86_PAE_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
487				ASSERT(page != NULL);
488
489				DEBUG_PAGE_ACCESS_START(page);
490
491				// transfer the accessed/dirty flags to the page
492				if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0)
493					page->accessed = true;
494				if ((oldEntry & X86_PAE_PTE_DIRTY) != 0)
495					page->modified = true;
496
497				// remove the mapping object/decrement the wired_count of the
498				// page
499				if (area->wiring == B_NO_LOCK) {
500					vm_page_mapping* mapping = NULL;
501					vm_page_mappings::Iterator iterator
502						= page->mappings.GetIterator();
503					while ((mapping = iterator.Next()) != NULL) {
504						if (mapping->area == area)
505							break;
506					}
507
508					ASSERT(mapping != NULL);
509
510					area->mappings.Remove(mapping);
511					page->mappings.Remove(mapping);
512					queue.Add(mapping);
513				} else
514					page->DecrementWiredCount();
515
516				if (!page->IsMapped()) {
517					atomic_add(&gMappedPagesCount, -1);
518
519					if (updatePageQueue) {
520						if (page->Cache()->temporary)
521							vm_page_set_state(page, PAGE_STATE_INACTIVE);
522						else if (page->modified)
523							vm_page_set_state(page, PAGE_STATE_MODIFIED);
524						else
525							vm_page_set_state(page, PAGE_STATE_CACHED);
526					}
527				}
528
529				DEBUG_PAGE_ACCESS_END(page);
530			}
531		}
532
533		Flush();
534			// flush explicitly, since we directly use the lock
535	} while (start != 0 && start < end);
536
537	// TODO: As in UnmapPage() we can lose page dirty flags here. ATM it's not
538	// really critical here, as in all cases this method is used, the unmapped
539	// area range is unmapped for good (resized/cut) and the pages will likely
540	// be freed.
541
542	locker.Unlock();
543
544	// free removed mappings
545	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
546	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
547		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
548	while (vm_page_mapping* mapping = queue.RemoveHead())
549		object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
550}
551
552
553void
554X86VMTranslationMapPAE::UnmapArea(VMArea* area, bool deletingAddressSpace,
555	bool ignoreTopCachePageFlags)
556{
557	if (area->cache_type == CACHE_TYPE_DEVICE || area->wiring != B_NO_LOCK) {
558		X86VMTranslationMapPAE::UnmapPages(area, area->Base(), area->Size(),
559			true);
560		return;
561	}
562
563	bool unmapPages = !deletingAddressSpace || !ignoreTopCachePageFlags;
564
565	RecursiveLocker locker(fLock);
566
567	VMAreaMappings mappings;
568	mappings.MoveFrom(&area->mappings);
569
570	for (VMAreaMappings::Iterator it = mappings.GetIterator();
571			vm_page_mapping* mapping = it.Next();) {
572		vm_page* page = mapping->page;
573		page->mappings.Remove(mapping);
574
575		VMCache* cache = page->Cache();
576
577		bool pageFullyUnmapped = false;
578		if (!page->IsMapped()) {
579			atomic_add(&gMappedPagesCount, -1);
580			pageFullyUnmapped = true;
581		}
582
583		if (unmapPages || cache != area->cache) {
584			addr_t address = area->Base()
585				+ ((page->cache_offset * B_PAGE_SIZE) - area->cache_offset);
586
587			pae_page_directory_entry* pageDirEntry
588				= X86PagingMethodPAE::PageDirEntryForAddress(
589					fPagingStructures->VirtualPageDirs(), address);
590			if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
591				panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
592					"has no page dir entry", page, area, address);
593				continue;
594			}
595
596			ThreadCPUPinner pinner(thread_get_current_thread());
597
598			pae_page_table_entry* pageTable
599				= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
600					*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
601			pae_page_table_entry oldEntry
602				= X86PagingMethodPAE::ClearPageTableEntry(
603					&pageTable[address / B_PAGE_SIZE
604						% kPAEPageTableEntryCount]);
605
606			pinner.Unlock();
607
608			if ((oldEntry & X86_PAE_PTE_PRESENT) == 0) {
609				panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
610					"has no page table entry", page, area, address);
611				continue;
612			}
613
614			// transfer the accessed/dirty flags to the page and invalidate
615			// the mapping, if necessary
616			if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
617				page->accessed = true;
618
619				if (!deletingAddressSpace)
620					InvalidatePage(address);
621			}
622
623			if ((oldEntry & X86_PAE_PTE_DIRTY) != 0)
624				page->modified = true;
625
626			if (pageFullyUnmapped) {
627				DEBUG_PAGE_ACCESS_START(page);
628
629				if (cache->temporary)
630					vm_page_set_state(page, PAGE_STATE_INACTIVE);
631				else if (page->modified)
632					vm_page_set_state(page, PAGE_STATE_MODIFIED);
633				else
634					vm_page_set_state(page, PAGE_STATE_CACHED);
635
636				DEBUG_PAGE_ACCESS_END(page);
637			}
638		}
639
640		fMapCount--;
641	}
642
643	Flush();
644		// flush explicitely, since we directly use the lock
645
646	locker.Unlock();
647
648	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
649	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
650		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
651	while (vm_page_mapping* mapping = mappings.RemoveHead())
652		object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
653}
654
655
656status_t
657X86VMTranslationMapPAE::Query(addr_t virtualAddress,
658	phys_addr_t* _physicalAddress, uint32* _flags)
659{
660	// default the flags to not present
661	*_flags = 0;
662	*_physicalAddress = 0;
663
664	// get the page directory entry
665	pae_page_directory_entry* pageDirEntry
666		= X86PagingMethodPAE::PageDirEntryForAddress(
667			fPagingStructures->VirtualPageDirs(), virtualAddress);
668	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
669		// no pagetable here
670		return B_OK;
671	}
672
673	// get the page table entry
674	Thread* thread = thread_get_current_thread();
675	ThreadCPUPinner pinner(thread);
676
677	pae_page_table_entry* pageTable
678		= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
679			*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
680	pae_page_table_entry entry
681		= pageTable[virtualAddress / B_PAGE_SIZE % kPAEPageTableEntryCount];
682
683	pinner.Unlock();
684
685	*_physicalAddress = entry & X86_PAE_PTE_ADDRESS_MASK;
686
687	// translate the page state flags
688	if ((entry & X86_PAE_PTE_USER) != 0) {
689		*_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
690			| B_READ_AREA;
691	}
692
693	*_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
694		| B_KERNEL_READ_AREA
695		| ((entry & X86_PAE_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
696		| ((entry & X86_PAE_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
697		| ((entry & X86_PAE_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
698
699	TRACE("X86VMTranslationMapPAE::Query(%#" B_PRIxADDR ") -> %#"
700		B_PRIxPHYSADDR ":\n", virtualAddress, *_physicalAddress);
701
702	return B_OK;
703}
704
705
706status_t
707X86VMTranslationMapPAE::QueryInterrupt(addr_t virtualAddress,
708	phys_addr_t* _physicalAddress, uint32* _flags)
709{
710	// default the flags to not present
711	*_flags = 0;
712	*_physicalAddress = 0;
713
714	// get the page directory entry
715	pae_page_directory_entry* pageDirEntry
716		= X86PagingMethodPAE::PageDirEntryForAddress(
717			fPagingStructures->VirtualPageDirs(), virtualAddress);
718	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
719		// no pagetable here
720		return B_OK;
721	}
722
723	// get the page table entry
724	pae_page_table_entry* pageTable
725		= (pae_page_table_entry*)X86PagingMethodPAE::Method()
726			->PhysicalPageMapper()->InterruptGetPageTableAt(
727				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
728	pae_page_table_entry entry
729		= pageTable[virtualAddress / B_PAGE_SIZE % kPAEPageTableEntryCount];
730
731	*_physicalAddress = entry & X86_PAE_PTE_ADDRESS_MASK;
732
733	// translate the page state flags
734	if ((entry & X86_PAE_PTE_USER) != 0) {
735		*_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
736			| B_READ_AREA;
737	}
738
739	*_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
740		| B_KERNEL_READ_AREA
741		| ((entry & X86_PAE_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
742		| ((entry & X86_PAE_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
743		| ((entry & X86_PAE_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
744
745	TRACE("X86VMTranslationMapPAE::Query(%#" B_PRIxADDR ") -> %#"
746		B_PRIxPHYSADDR ":\n", *_physicalAddress, virtualAddress);
747
748	return B_OK;
749}
750
751
752status_t
753X86VMTranslationMapPAE::Protect(addr_t start, addr_t end, uint32 attributes,
754	uint32 memoryType)
755{
756	start = ROUNDDOWN(start, B_PAGE_SIZE);
757	if (start >= end)
758		return B_OK;
759
760	TRACE("X86VMTranslationMapPAE::Protect(): %#" B_PRIxADDR " - %#" B_PRIxADDR
761		", attributes: %#" B_PRIx32 "\n", start, end, attributes);
762
763	// compute protection flags
764	uint64 newProtectionFlags = 0;
765	if ((attributes & B_USER_PROTECTION) != 0) {
766		newProtectionFlags = X86_PAE_PTE_USER;
767		if ((attributes & B_WRITE_AREA) != 0)
768			newProtectionFlags |= X86_PAE_PTE_WRITABLE;
769	} else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
770		newProtectionFlags = X86_PAE_PTE_WRITABLE;
771
772	do {
773		pae_page_directory_entry* pageDirEntry
774			= X86PagingMethodPAE::PageDirEntryForAddress(
775				fPagingStructures->VirtualPageDirs(), start);
776		if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
777			// no page table here, move the start up to access the next page
778			// table
779			start = ROUNDUP(start + 1, kPAEPageTableRange);
780			continue;
781		}
782
783		Thread* thread = thread_get_current_thread();
784		ThreadCPUPinner pinner(thread);
785
786		pae_page_table_entry* pageTable
787			= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
788				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
789
790		uint32 index = start / B_PAGE_SIZE % kPAEPageTableEntryCount;
791		for (; index < kPAEPageTableEntryCount && start < end;
792				index++, start += B_PAGE_SIZE) {
793			pae_page_table_entry entry = pageTable[index];
794			if ((pageTable[index] & X86_PAE_PTE_PRESENT) == 0) {
795				// page mapping not valid
796				continue;
797			}
798
799			TRACE("X86VMTranslationMapPAE::Protect(): protect page %#"
800				B_PRIxADDR "\n", start);
801
802			// set the new protection flags -- we want to do that atomically,
803			// without changing the accessed or dirty flag
804			pae_page_table_entry oldEntry;
805			while (true) {
806				oldEntry = X86PagingMethodPAE::TestAndSetPageTableEntry(
807					&pageTable[index],
808					(entry & ~(X86_PAE_PTE_PROTECTION_MASK
809							| X86_PAE_PTE_MEMORY_TYPE_MASK))
810						| newProtectionFlags
811						| X86PagingMethodPAE::MemoryTypeToPageTableEntryFlags(
812							memoryType),
813					entry);
814				if (oldEntry == entry)
815					break;
816				entry = oldEntry;
817			}
818
819			if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
820				// Note, that we only need to invalidate the address, if the
821				// accessed flag was set, since only then the entry could have been
822				// in any TLB.
823				InvalidatePage(start);
824			}
825		}
826	} while (start != 0 && start < end);
827
828	return B_OK;
829}
830
831
832status_t
833X86VMTranslationMapPAE::ClearFlags(addr_t address, uint32 flags)
834{
835	pae_page_directory_entry* pageDirEntry
836		= X86PagingMethodPAE::PageDirEntryForAddress(
837			fPagingStructures->VirtualPageDirs(), address);
838	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
839		// no pagetable here
840		return B_OK;
841	}
842
843	uint64 flagsToClear = ((flags & PAGE_MODIFIED) ? X86_PAE_PTE_DIRTY : 0)
844		| ((flags & PAGE_ACCESSED) ? X86_PAE_PTE_ACCESSED : 0);
845
846	Thread* thread = thread_get_current_thread();
847	ThreadCPUPinner pinner(thread);
848
849	pae_page_table_entry* entry
850		= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
851				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK)
852			+ address / B_PAGE_SIZE % kPAEPageTableEntryCount;
853
854	// clear out the flags we've been requested to clear
855	pae_page_table_entry oldEntry
856		= X86PagingMethodPAE::ClearPageTableEntryFlags(entry, flagsToClear);
857
858	pinner.Unlock();
859
860	if ((oldEntry & flagsToClear) != 0)
861		InvalidatePage(address);
862
863	return B_OK;
864}
865
866
867bool
868X86VMTranslationMapPAE::ClearAccessedAndModified(VMArea* area, addr_t address,
869	bool unmapIfUnaccessed, bool& _modified)
870{
871	ASSERT(address % B_PAGE_SIZE == 0);
872
873	TRACE("X86VMTranslationMapPAE::ClearAccessedAndModified(%#" B_PRIxADDR
874		")\n", address);
875
876	pae_page_directory_entry* pageDirEntry
877		= X86PagingMethodPAE::PageDirEntryForAddress(
878			fPagingStructures->VirtualPageDirs(), address);
879
880	RecursiveLocker locker(fLock);
881
882	if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0)
883		return false;
884
885	ThreadCPUPinner pinner(thread_get_current_thread());
886
887	pae_page_table_entry* entry
888		= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
889				*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK)
890			+ address / B_PAGE_SIZE % kPAEPageTableEntryCount;
891
892	// perform the deed
893	pae_page_table_entry oldEntry;
894
895	if (unmapIfUnaccessed) {
896		while (true) {
897			oldEntry = *entry;
898			if ((oldEntry & X86_PAE_PTE_PRESENT) == 0) {
899				// page mapping not valid
900				return false;
901			}
902
903			if (oldEntry & X86_PAE_PTE_ACCESSED) {
904				// page was accessed -- just clear the flags
905				oldEntry = X86PagingMethodPAE::ClearPageTableEntryFlags(entry,
906					X86_PAE_PTE_ACCESSED | X86_PAE_PTE_DIRTY);
907				break;
908			}
909
910			// page hasn't been accessed -- unmap it
911			if (X86PagingMethodPAE::TestAndSetPageTableEntry(entry, 0, oldEntry)
912					== oldEntry) {
913				break;
914			}
915
916			// something changed -- check again
917		}
918	} else {
919		oldEntry = X86PagingMethodPAE::ClearPageTableEntryFlags(entry,
920			X86_PAE_PTE_ACCESSED | X86_PAE_PTE_DIRTY);
921	}
922
923	pinner.Unlock();
924
925	_modified = (oldEntry & X86_PAE_PTE_DIRTY) != 0;
926
927	if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
928		// Note, that we only need to invalidate the address, if the
929		// accessed flags was set, since only then the entry could have been
930		// in any TLB.
931		InvalidatePage(address);
932		Flush();
933
934		return true;
935	}
936
937	if (!unmapIfUnaccessed)
938		return false;
939
940	// We have unmapped the address. Do the "high level" stuff.
941
942	fMapCount--;
943
944	locker.Detach();
945		// UnaccessedPageUnmapped() will unlock for us
946
947	UnaccessedPageUnmapped(area,
948		(oldEntry & X86_PAE_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
949
950	return false;
951}
952
953
954X86PagingStructures*
955X86VMTranslationMapPAE::PagingStructures() const
956{
957	return fPagingStructures;
958}
959
960
961#endif	// B_HAIKU_PHYSICAL_BITS == 64
962