1/*
2 * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2002-2007, Axel D��rfler, axeld@pinc-software.de. All rights reserved.
4 * Distributed under the terms of the MIT License.
5 *
6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
8 */
9
10
11#include "paging/460/PPCPagingMethod460.h"
12
13#include <stdlib.h>
14#include <string.h>
15
16#include <AutoDeleter.h>
17
18#include <arch/cpu.h>
19#include <arch_mmu.h>
20#include <arch_system_info.h>
21#include <boot/kernel_args.h>
22#include <int.h>
23#include <thread.h>
24#include <vm/vm.h>
25#include <vm/VMAddressSpace.h>
26
27#include "paging/460/PPCPagingStructures460.h"
28#include "paging/460/PPCVMTranslationMap460.h"
29#include "generic_vm_physical_page_mapper.h"
30#include "generic_vm_physical_page_ops.h"
31#include "GenericVMPhysicalPageMapper.h"
32
33
34//#define TRACE_PPC_PAGING_METHOD_460
35#ifdef TRACE_PPC_PAGING_METHOD_460
36#	define TRACE(x...) dprintf(x)
37#else
38#	define TRACE(x...) ;
39#endif
40
41// 64 MB of iospace
42#define IOSPACE_SIZE (64*1024*1024)
43// We only have small (4 KB) pages. The only reason for choosing greater chunk
44// size is to keep the waste of memory limited, since the generic page mapper
45// allocates structures per physical/virtual chunk.
46// TODO: Implement a page mapper more suitable for small pages!
47#define IOSPACE_CHUNK_SIZE (16 * B_PAGE_SIZE)
48
49static addr_t sIOSpaceBase;
50
51
52static status_t
53map_iospace_chunk(addr_t va, phys_addr_t pa, uint32 flags)
54{
55	pa &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned
56	va &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned
57	if (va < sIOSpaceBase || va >= (sIOSpaceBase + IOSPACE_SIZE))
58		panic("map_iospace_chunk: passed invalid va 0x%lx\n", va);
59
60	// map the pages
61	return ppc_map_address_range(va, pa, IOSPACE_CHUNK_SIZE);
62}
63
64
65// #pragma mark - PPCPagingMethod460
66
67
68PPCPagingMethod460::PPCPagingMethod460()
69/*
70	:
71	fPageHole(NULL),
72	fPageHolePageDir(NULL),
73	fKernelPhysicalPageDirectory(0),
74	fKernelVirtualPageDirectory(NULL),
75	fPhysicalPageMapper(NULL),
76	fKernelPhysicalPageMapper(NULL)
77*/
78{
79}
80
81
82PPCPagingMethod460::~PPCPagingMethod460()
83{
84}
85
86
87status_t
88PPCPagingMethod460::Init(kernel_args* args,
89	VMPhysicalPageMapper** _physicalPageMapper)
90{
91	TRACE("PPCPagingMethod460::Init(): entry\n");
92
93	fPageTable = (page_table_entry_group *)args->arch_args.page_table.start;
94	fPageTableSize = args->arch_args.page_table.size;
95	fPageTableHashMask = fPageTableSize / sizeof(page_table_entry_group) - 1;
96
97	// init physical page mapper
98	status_t error = generic_vm_physical_page_mapper_init(args,
99		map_iospace_chunk, &sIOSpaceBase, IOSPACE_SIZE, IOSPACE_CHUNK_SIZE);
100	if (error != B_OK)
101		return error;
102
103	new(&fPhysicalPageMapper) GenericVMPhysicalPageMapper;
104
105	*_physicalPageMapper = &fPhysicalPageMapper;
106	return B_OK;
107
108#if 0//X86
109	fKernelPhysicalPageDirectory = args->arch_args.phys_pgdir;
110	fKernelVirtualPageDirectory = (page_directory_entry*)(addr_t)
111		args->arch_args.vir_pgdir;
112
113#ifdef TRACE_PPC_PAGING_METHOD_460
114	TRACE("page hole: %p, page dir: %p\n", fPageHole, fPageHolePageDir);
115	TRACE("page dir: %p (physical: %#" B_PRIx32 ")\n",
116		fKernelVirtualPageDirectory, fKernelPhysicalPageDirectory);
117#endif
118
119	PPCPagingStructures460::StaticInit();
120
121	// create the initial pool for the physical page mapper
122	PhysicalPageSlotPool* pool
123		= new(&PhysicalPageSlotPool::sInitialPhysicalPagePool)
124			PhysicalPageSlotPool;
125	status_t error = pool->InitInitial(args);
126	if (error != B_OK) {
127		panic("PPCPagingMethod460::Init(): Failed to create initial pool "
128			"for physical page mapper!");
129		return error;
130	}
131
132	// create physical page mapper
133	large_memory_physical_page_ops_init(args, pool, fPhysicalPageMapper,
134		fKernelPhysicalPageMapper);
135		// TODO: Select the best page mapper!
136
137	// enable global page feature if available
138	if (x86_check_feature(IA32_FEATURE_PGE, FEATURE_COMMON)) {
139		// this prevents kernel pages from being flushed from TLB on
140		// context-switch
141		x86_write_cr4(x86_read_cr4() | IA32_CR4_GLOBAL_PAGES);
142	}
143
144	TRACE("PPCPagingMethod460::Init(): done\n");
145
146	*_physicalPageMapper = fPhysicalPageMapper;
147	return B_OK;
148#endif
149}
150
151
152status_t
153PPCPagingMethod460::InitPostArea(kernel_args* args)
154{
155
156	// If the page table doesn't lie within the kernel address space, we
157	// remap it.
158	if (!IS_KERNEL_ADDRESS(fPageTable)) {
159		addr_t newAddress = (addr_t)fPageTable;
160		status_t error = ppc_remap_address_range(&newAddress, fPageTableSize,
161			false);
162		if (error != B_OK) {
163			panic("arch_vm_translation_map_init_post_area(): Failed to remap "
164				"the page table!");
165			return error;
166		}
167
168		// set the new page table address
169		addr_t oldVirtualBase = (addr_t)(fPageTable);
170		fPageTable = (page_table_entry_group*)newAddress;
171
172		// unmap the old pages
173		ppc_unmap_address_range(oldVirtualBase, fPageTableSize);
174
175// TODO: We should probably map the page table via BAT. It is relatively large,
176// and due to being a hash table the access patterns might look sporadic, which
177// certainly isn't to the liking of the TLB.
178	}
179
180	// create an area to cover the page table
181	fPageTableArea = create_area("page_table", (void **)&fPageTable, B_EXACT_ADDRESS,
182		fPageTableSize, B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
183
184	// init physical page mapper
185	status_t error = generic_vm_physical_page_mapper_init_post_area(args);
186	if (error != B_OK)
187		return error;
188
189	return B_OK;
190
191#if 0//X86
192	// now that the vm is initialized, create an area that represents
193	// the page hole
194	void *temp;
195	status_t error;
196	area_id area;
197
198	// unmap the page hole hack we were using before
199	fKernelVirtualPageDirectory[1023] = 0;
200	fPageHolePageDir = NULL;
201	fPageHole = NULL;
202
203	temp = (void*)fKernelVirtualPageDirectory;
204	area = create_area("kernel_pgdir", &temp, B_EXACT_ADDRESS, B_PAGE_SIZE,
205		B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
206	if (area < B_OK)
207		return area;
208
209	error = PhysicalPageSlotPool::sInitialPhysicalPagePool
210		.InitInitialPostArea(args);
211	if (error != B_OK)
212		return error;
213
214	return B_OK;
215#endif//X86
216}
217
218
219status_t
220PPCPagingMethod460::CreateTranslationMap(bool kernel, VMTranslationMap** _map)
221{
222	PPCVMTranslationMap460* map = new(std::nothrow) PPCVMTranslationMap460;
223	if (map == NULL)
224		return B_NO_MEMORY;
225
226	status_t error = map->Init(kernel);
227	if (error != B_OK) {
228		delete map;
229		return error;
230	}
231
232	*_map = map;
233	return B_OK;
234}
235
236
237status_t
238PPCPagingMethod460::MapEarly(kernel_args* args, addr_t virtualAddress,
239	phys_addr_t physicalAddress, uint8 attributes,
240	page_num_t (*get_free_page)(kernel_args*))
241{
242	uint32 virtualSegmentID = get_sr((void *)virtualAddress) & 0xffffff;
243
244	uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, (uint32)virtualAddress);
245	page_table_entry_group *group = &fPageTable[hash & fPageTableHashMask];
246
247	for (int32 i = 0; i < 8; i++) {
248		// 8 entries in a group
249		if (group->entry[i].valid)
250			continue;
251
252		FillPageTableEntry(&group->entry[i], virtualSegmentID,
253			virtualAddress, physicalAddress, PTE_READ_WRITE, 0, false);
254		return B_OK;
255	}
256
257	hash = page_table_entry::SecondaryHash(hash);
258	group = &fPageTable[hash & fPageTableHashMask];
259
260	for (int32 i = 0; i < 8; i++) {
261		if (group->entry[i].valid)
262			continue;
263
264		FillPageTableEntry(&group->entry[i], virtualSegmentID,
265			virtualAddress, physicalAddress, PTE_READ_WRITE, 0, true);
266		return B_OK;
267	}
268
269	return B_ERROR;
270}
271
272
273bool
274PPCPagingMethod460::IsKernelPageAccessible(addr_t virtualAddress,
275	uint32 protection)
276{
277	// TODO:factor out to baseclass
278	VMAddressSpace *addressSpace = VMAddressSpace::Kernel();
279
280//XXX:
281//	PPCVMTranslationMap* map = static_cast<PPCVMTranslationMap*>(
282//		addressSpace->TranslationMap());
283//	VMTranslationMap* map = addressSpace->TranslationMap();
284	PPCVMTranslationMap460* map = static_cast<PPCVMTranslationMap460*>(
285		addressSpace->TranslationMap());
286
287	phys_addr_t physicalAddress;
288	uint32 flags;
289	if (map->Query(virtualAddress, &physicalAddress, &flags) != B_OK)
290		return false;
291
292	if ((flags & PAGE_PRESENT) == 0)
293		return false;
294
295	// present means kernel-readable, so check for writable
296	return (protection & B_KERNEL_WRITE_AREA) == 0
297		|| (flags & B_KERNEL_WRITE_AREA) != 0;
298}
299
300
301void
302PPCPagingMethod460::FillPageTableEntry(page_table_entry *entry,
303	uint32 virtualSegmentID, addr_t virtualAddress, phys_addr_t physicalAddress,
304	uint8 protection, uint32 memoryType, bool secondaryHash)
305{
306	// lower 32 bit - set at once
307	entry->physical_page_number = physicalAddress / B_PAGE_SIZE;
308	entry->_reserved0 = 0;
309	entry->referenced = false;
310	entry->changed = false;
311	entry->write_through = (memoryType == B_MTR_UC) || (memoryType == B_MTR_WT);
312	entry->caching_inhibited = (memoryType == B_MTR_UC);
313	entry->memory_coherent = false;
314	entry->guarded = false;
315	entry->_reserved1 = 0;
316	entry->page_protection = protection & 0x3;
317	eieio();
318		// we need to make sure that the lower 32 bit were
319		// already written when the entry becomes valid
320
321	// upper 32 bit
322	entry->virtual_segment_id = virtualSegmentID;
323	entry->secondary_hash = secondaryHash;
324	entry->abbr_page_index = (virtualAddress >> 22) & 0x3f;
325	entry->valid = true;
326
327	ppc_sync();
328}
329
330
331#if 0//X86
332/*static*/ void
333PPCPagingMethod460::PutPageTableInPageDir(page_directory_entry* entry,
334	phys_addr_t pgtablePhysical, uint32 attributes)
335{
336	*entry = (pgtablePhysical & PPC_PDE_ADDRESS_MASK)
337		| PPC_PDE_PRESENT
338		| PPC_PDE_WRITABLE
339		| PPC_PDE_USER;
340		// TODO: we ignore the attributes of the page table - for compatibility
341		// with BeOS we allow having user accessible areas in the kernel address
342		// space. This is currently being used by some drivers, mainly for the
343		// frame buffer. Our current real time data implementation makes use of
344		// this fact, too.
345		// We might want to get rid of this possibility one day, especially if
346		// we intend to port it to a platform that does not support this.
347}
348
349
350/*static*/ void
351PPCPagingMethod460::PutPageTableEntryInTable(page_table_entry* entry,
352	phys_addr_t physicalAddress, uint32 attributes, uint32 memoryType,
353	bool globalPage)
354{
355	page_table_entry page = (physicalAddress & PPC_PTE_ADDRESS_MASK)
356		| PPC_PTE_PRESENT | (globalPage ? PPC_PTE_GLOBAL : 0)
357		| MemoryTypeToPageTableEntryFlags(memoryType);
358
359	// if the page is user accessible, it's automatically
360	// accessible in kernel space, too (but with the same
361	// protection)
362	if ((attributes & B_USER_PROTECTION) != 0) {
363		page |= PPC_PTE_USER;
364		if ((attributes & B_WRITE_AREA) != 0)
365			page |= PPC_PTE_WRITABLE;
366	} else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
367		page |= PPC_PTE_WRITABLE;
368
369	// put it in the page table
370	*(volatile page_table_entry*)entry = page;
371}
372
373
374/*static*/ void
375PPCPagingMethod460::_EarlyPreparePageTables(page_table_entry* pageTables,
376	addr_t address, size_t size)
377{
378	memset(pageTables, 0, B_PAGE_SIZE * (size / (B_PAGE_SIZE * 1024)));
379
380	// put the array of pgtables directly into the kernel pagedir
381	// these will be wired and kept mapped into virtual space to be easy to get
382	// to
383	{
384		addr_t virtualTable = (addr_t)pageTables;
385
386		page_directory_entry* pageHolePageDir
387			= PPCPagingMethod460::Method()->PageHolePageDir();
388
389		for (size_t i = 0; i < (size / (B_PAGE_SIZE * 1024));
390				i++, virtualTable += B_PAGE_SIZE) {
391			phys_addr_t physicalTable = 0;
392			_EarlyQuery(virtualTable, &physicalTable);
393			page_directory_entry* entry = &pageHolePageDir[
394				(address / (B_PAGE_SIZE * 1024)) + i];
395			PutPageTableInPageDir(entry, physicalTable,
396				B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
397		}
398	}
399}
400
401
402//! TODO: currently assumes this translation map is active
403/*static*/ status_t
404PPCPagingMethod460::_EarlyQuery(addr_t virtualAddress,
405	phys_addr_t *_physicalAddress)
406{
407	PPCPagingMethod460* method = PPCPagingMethod460::Method();
408	int index = VADDR_TO_PDENT(virtualAddress);
409	if ((method->PageHolePageDir()[index] & PPC_PDE_PRESENT) == 0) {
410		// no pagetable here
411		return B_ERROR;
412	}
413
414	page_table_entry* entry = method->PageHole() + virtualAddress / B_PAGE_SIZE;
415	if ((*entry & PPC_PTE_PRESENT) == 0) {
416		// page mapping not valid
417		return B_ERROR;
418	}
419
420	*_physicalAddress = *entry & PPC_PTE_ADDRESS_MASK;
421	return B_OK;
422}
423#endif
424