1/*
2 * Copyright 2010-2012, François, revol@free.fr.
3 * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
4 * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
5 * Distributed under the terms of the MIT License.
6 *
7 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
8 * Distributed under the terms of the NewOS License.
9 */
10
11
12#include "paging/040/M68KPagingMethod040.h"
13
14#include <stdlib.h>
15#include <string.h>
16
17#include <AutoDeleter.h>
18
19#include <arch_system_info.h>
20#include <boot/kernel_args.h>
21#include <int.h>
22#include <thread.h>
23#include <vm/vm.h>
24#include <vm/VMAddressSpace.h>
25
26#include "paging/040/M68KPagingStructures040.h"
27#include "paging/040/M68KVMTranslationMap040.h"
28#include "paging/m68k_physical_page_mapper.h"
29#include "paging/m68k_physical_page_mapper_large_memory.h"
30
31
32#define TRACE_M68K_PAGING_METHOD_32_BIT
33#ifdef TRACE_M68K_PAGING_METHOD_32_BIT
34#	define TRACE(x...) dprintf(x)
35#else
36#	define TRACE(x...) ;
37#endif
38
39
40/* Slots per pool for the physical page mapper.
41 * Since m68k page tables are smaller than 1 page, but we allocate them
42 * at page granularity anyway, just go for this.
43 */
44#define SLOTS_PER_POOL	1024
45
46using M68KLargePhysicalPageMapper::PhysicalPageSlot;
47
48//XXX: make it a class member
49//static page_table_entry sQueryDesc __attribute__ (( aligned (4) ));
50//XXX:REMOVEME
51//static addr_t sIOSpaceBase;
52
53//XXX: stuff it in the class
54#warning M68K:REMOVE
55static inline void
56init_page_root_entry(page_root_entry *entry)
57{
58	// DT_INVALID is 0
59	*entry = DFL_ROOTENT_VAL;
60}
61
62
63static inline void
64update_page_root_entry(page_root_entry *entry, page_root_entry *with)
65{
66	// update page directory entry atomically
67	*entry = *with;
68}
69
70
71static inline void
72init_page_directory_entry(page_directory_entry *entry)
73{
74	*entry = DFL_DIRENT_VAL;
75}
76
77
78static inline void
79update_page_directory_entry(page_directory_entry *entry, page_directory_entry *with)
80{
81	// update page directory entry atomically
82	*entry = *with;
83}
84
85
86static inline void
87init_page_table_entry(page_table_entry *entry)
88{
89	*entry = DFL_PAGEENT_VAL;
90}
91
92
93static inline void
94update_page_table_entry(page_table_entry *entry, page_table_entry *with)
95{
96	// update page table entry atomically
97	// XXX: is it ?? (long desc?)
98	*entry = *with;
99}
100
101
102static inline void
103init_page_indirect_entry(page_indirect_entry *entry)
104{
105#warning M68K: is it correct ?
106	*entry = DFL_PAGEENT_VAL;
107}
108
109
110static inline void
111update_page_indirect_entry(page_indirect_entry *entry, page_indirect_entry *with)
112{
113	// update page table entry atomically
114	// XXX: is it ?? (long desc?)
115	*entry = *with;
116}
117
118
119
120// #pragma mark - M68KPagingMethod040::PhysicalPageSlotPool
121
122
123struct M68KPagingMethod040::PhysicalPageSlotPool
124	: M68KLargePhysicalPageMapper::PhysicalPageSlotPool {
125public:
126	virtual						~PhysicalPageSlotPool();
127
128			status_t			InitInitial(kernel_args* args);
129			status_t			InitInitialPostArea(kernel_args* args);
130
131			void				Init(area_id dataArea, void* data,
132									area_id virtualArea, addr_t virtualBase);
133
134	virtual	status_t			AllocatePool(
135									M68KLargePhysicalPageMapper
136										::PhysicalPageSlotPool*& _pool);
137	virtual	void				Map(phys_addr_t physicalAddress,
138									addr_t virtualAddress);
139
140public:
141	static	PhysicalPageSlotPool sInitialPhysicalPagePool;
142
143private:
144	area_id					fDataArea;
145	area_id					fVirtualArea;
146	addr_t					fVirtualBase;
147	page_table_entry*		fPageTable;
148};
149
150
151M68KPagingMethod040::PhysicalPageSlotPool
152	M68KPagingMethod040::PhysicalPageSlotPool::sInitialPhysicalPagePool;
153
154
155M68KPagingMethod040::PhysicalPageSlotPool::~PhysicalPageSlotPool()
156{
157}
158
159
160status_t
161M68KPagingMethod040::PhysicalPageSlotPool::InitInitial(kernel_args* args)
162{
163	// allocate a virtual address range for the pages to be mapped into
164	addr_t virtualBase = vm_allocate_early(args, SLOTS_PER_POOL * B_PAGE_SIZE,
165		0, 0, kPageTableAlignment);
166	if (virtualBase == 0) {
167		panic("LargeMemoryPhysicalPageMapper::Init(): Failed to reserve "
168			"physical page pool space in virtual address space!");
169		return B_ERROR;
170	}
171
172	// allocate memory for the page table and data
173	size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[SLOTS_PER_POOL]);
174	page_table_entry* pageTable = (page_table_entry*)vm_allocate_early(args,
175		areaSize, ~0L, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0);
176
177	// prepare the page table
178	_EarlyPreparePageTables(pageTable, virtualBase,
179		SLOTS_PER_POOL * B_PAGE_SIZE);
180
181	// init the pool structure and add the initial pool
182	Init(-1, pageTable, -1, (addr_t)virtualBase);
183
184	return B_OK;
185}
186
187
188status_t
189M68KPagingMethod040::PhysicalPageSlotPool::InitInitialPostArea(
190	kernel_args* args)
191{
192#warning M68K:WRITEME
193	// create an area for the (already allocated) data
194	size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[SLOTS_PER_POOL]);
195	void* temp = fPageTable;
196	area_id area = create_area("physical page pool", &temp,
197		B_EXACT_ADDRESS, areaSize, B_ALREADY_WIRED,
198		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
199	if (area < B_OK) {
200		panic("LargeMemoryPhysicalPageMapper::InitPostArea(): Failed to "
201			"create area for physical page pool.");
202		return area;
203	}
204	fDataArea = area;
205
206	// create an area for the virtual address space
207	temp = (void*)fVirtualBase;
208	area = vm_create_null_area(VMAddressSpace::KernelID(),
209		"physical page pool space", &temp, B_EXACT_ADDRESS,
210		SLOTS_PER_POOL * B_PAGE_SIZE, 0);
211	if (area < B_OK) {
212		panic("LargeMemoryPhysicalPageMapper::InitPostArea(): Failed to "
213			"create area for physical page pool space.");
214		return area;
215	}
216	fVirtualArea = area;
217
218	return B_OK;
219}
220
221
222void
223M68KPagingMethod040::PhysicalPageSlotPool::Init(area_id dataArea, void* data,
224	area_id virtualArea, addr_t virtualBase)
225{
226	fDataArea = dataArea;
227	fVirtualArea = virtualArea;
228	fVirtualBase = virtualBase;
229	fPageTable = (page_table_entry*)data;
230
231	// init slot list
232	fSlots = (PhysicalPageSlot*)(fPageTable + SLOTS_PER_POOL);
233	addr_t slotAddress = virtualBase;
234	for (int32 i = 0; i < SLOTS_PER_POOL; i++, slotAddress += B_PAGE_SIZE) {
235		PhysicalPageSlot* slot = &fSlots[i];
236		slot->next = slot + 1;
237		slot->pool = this;
238		slot->address = slotAddress;
239	}
240
241	fSlots[1023].next = NULL;
242		// terminate list
243}
244
245
246void
247M68KPagingMethod040::PhysicalPageSlotPool::Map(phys_addr_t physicalAddress,
248	addr_t virtualAddress)
249{
250	page_table_entry& pte = fPageTable[
251		(virtualAddress - fVirtualBase) / B_PAGE_SIZE];
252	pte = TA_TO_PTEA(physicalAddress) | DT_PAGE
253		| M68K_PTE_SUPERVISOR | M68K_PTE_GLOBAL;
254
255	arch_cpu_invalidate_TLB_range(virtualAddress, virtualAddress);
256}
257
258
259status_t
260M68KPagingMethod040::PhysicalPageSlotPool::AllocatePool(
261	M68KLargePhysicalPageMapper::PhysicalPageSlotPool*& _pool)
262{
263	// create the pool structure
264	PhysicalPageSlotPool* pool = new(std::nothrow) PhysicalPageSlotPool;
265	if (pool == NULL)
266		return B_NO_MEMORY;
267	ObjectDeleter<PhysicalPageSlotPool> poolDeleter(pool);
268
269	// create an area that can contain the page table and the slot
270	// structures
271	size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[SLOTS_PER_POOL]);
272	void* data;
273	virtual_address_restrictions virtualRestrictions = {};
274	virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
275	physical_address_restrictions physicalRestrictions = {};
276	area_id dataArea = create_area_etc(B_SYSTEM_TEAM, "physical page pool",
277		PAGE_ALIGN(areaSize), B_FULL_LOCK,
278		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT, 0,
279		&virtualRestrictions, &physicalRestrictions, &data);
280	if (dataArea < 0)
281		return dataArea;
282
283	// create the null area for the virtual address space
284	void* virtualBase;
285	area_id virtualArea = vm_create_null_area(
286		VMAddressSpace::KernelID(), "physical page pool space",
287		&virtualBase, B_ANY_KERNEL_BLOCK_ADDRESS, SLOTS_PER_POOL * B_PAGE_SIZE,
288		CREATE_AREA_PRIORITY_VIP);
289	if (virtualArea < 0) {
290		delete_area(dataArea);
291		return virtualArea;
292	}
293
294	// prepare the page table
295	memset(data, 0, B_PAGE_SIZE);
296
297	// get the page table's physical address
298	phys_addr_t physicalTable;
299	M68KVMTranslationMap040* map = static_cast<M68KVMTranslationMap040*>(
300		VMAddressSpace::Kernel()->TranslationMap());
301	uint32 dummyFlags;
302	cpu_status state = disable_interrupts();
303	map->QueryInterrupt((addr_t)data, &physicalTable, &dummyFlags);
304	restore_interrupts(state);
305
306#warning M68K:FIXME: insert *all* page tables!
307	panic("I'm lazy");
308#if 0
309	// put the page table into the page directory
310	int32 index = (addr_t)virtualBase / (B_PAGE_SIZE * SLOTS_PER_POOL);
311	page_directory_entry* entry
312		= &map->PagingStructures040()->pgdir_virt[index];
313	PutPageTableInPageDir(entry, physicalTable,
314		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
315	M68KPagingStructures040::UpdateAllPageDirs(index, *entry);
316#endif
317
318	// init the pool structure
319	pool->Init(dataArea, data, virtualArea, (addr_t)virtualBase);
320	poolDeleter.Detach();
321	_pool = pool;
322	return B_OK;
323}
324
325
326// #pragma mark - M68KPagingMethod040
327
328
329M68KPagingMethod040::M68KPagingMethod040()
330	:
331	//fPageHole(NULL),
332	//fPageHolePageDir(NULL),
333	fKernelPhysicalPageRoot(0),
334	fKernelVirtualPageRoot(NULL),
335	fPhysicalPageMapper(NULL),
336	fKernelPhysicalPageMapper(NULL)
337{
338}
339
340
341M68KPagingMethod040::~M68KPagingMethod040()
342{
343}
344
345
346status_t
347M68KPagingMethod040::Init(kernel_args* args,
348	VMPhysicalPageMapper** _physicalPageMapper)
349{
350	TRACE("M68KPagingMethod040::Init(): entry\n");
351
352#if 0//XXX:We might actually need this trick to support Milan
353	// page hole set up in stage2
354	fPageHole = (page_table_entry*)args->arch_args.page_hole;
355	// calculate where the pgdir would be
356	fPageHolePageDir = (page_directory_entry*)
357		(((addr_t)args->arch_args.page_hole)
358			+ (B_PAGE_SIZE * 1024 - B_PAGE_SIZE));
359	// clear out the bottom 2 GB, unmap everything
360	memset(fPageHolePageDir + FIRST_USER_PGDIR_ENT, 0,
361		sizeof(page_directory_entry) * NUM_USER_PGDIR_ENTS);
362#endif
363
364	fKernelPhysicalPageRoot = (uint32)args->arch_args.phys_pgroot;
365	fKernelVirtualPageRoot = (page_root_entry *)args->arch_args.vir_pgroot;
366
367#ifdef TRACE_M68K_PAGING_METHOD_32_BIT
368	//TRACE("page hole: %p, page dir: %p\n", fPageHole, fPageHolePageDir);
369	TRACE("page root: %p (physical: %#" B_PRIx32 ")\n",
370		fKernelVirtualPageRoot, fKernelPhysicalPageRoot);
371#endif
372
373	//sQueryDesc.type = DT_INVALID;
374
375	M68KPagingStructures040::StaticInit();
376
377	// create the initial pool for the physical page mapper
378	PhysicalPageSlotPool* pool
379		= new(&PhysicalPageSlotPool::sInitialPhysicalPagePool)
380			PhysicalPageSlotPool;
381	status_t error = pool->InitInitial(args);
382	if (error != B_OK) {
383		panic("M68KPagingMethod040::Init(): Failed to create initial pool "
384			"for physical page mapper!");
385		return error;
386	}
387
388	// create physical page mapper
389	large_memory_physical_page_ops_init(args, pool, fPhysicalPageMapper,
390		fKernelPhysicalPageMapper);
391		// TODO: Select the best page mapper!
392
393	TRACE("M68KPagingMethod040::Init(): done\n");
394
395	*_physicalPageMapper = fPhysicalPageMapper;
396	return B_OK;
397}
398
399
400status_t
401M68KPagingMethod040::InitPostArea(kernel_args* args)
402{
403	TRACE("M68KPagingMethod040::InitPostArea(): entry\n");
404	// now that the vm is initialized, create an area that represents
405	// the page hole
406	void *temp;
407	status_t error;
408	area_id area;
409
410#if 0
411	// unmap the page hole hack we were using before
412	fKernelVirtualPageDirectory[1023] = 0;
413	fPageHolePageDir = NULL;
414	fPageHole = NULL;
415#endif
416
417	temp = (void*)fKernelVirtualPageRoot;
418	area = create_area("kernel_pgdir", &temp, B_EXACT_ADDRESS, B_PAGE_SIZE,
419		B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
420	if (area < B_OK)
421		return area;
422
423	error = PhysicalPageSlotPool::sInitialPhysicalPagePool
424		.InitInitialPostArea(args);
425	if (error != B_OK)
426		return error;
427
428	// this area is used for query_tmap_interrupt()
429	// TODO: Note, this only works as long as all pages belong to the same
430	//	page table, which is not yet enforced (or even tested)!
431	// Note we don't support SMP which makes things simpler.
432#if 0	//XXX: Do we need this anymore?
433	area = vm_create_null_area(VMAddressSpace::KernelID(),
434		"interrupt query pages", (void **)&queryPage, B_ANY_ADDRESS,
435		B_PAGE_SIZE, 0);
436	if (area < B_OK)
437		return area;
438
439	// insert the indirect descriptor in the tree so we can map the page we want from it.
440	//XXX...
441#endif
442
443	TRACE("M68KPagingMethod040::InitPostArea(): done\n");
444	return B_OK;
445}
446
447
448status_t
449M68KPagingMethod040::CreateTranslationMap(bool kernel, VMTranslationMap** _map)
450{
451	M68KVMTranslationMap040* map;
452
453	map = new(std::nothrow) M68KVMTranslationMap040;
454	if (map == NULL)
455		return B_NO_MEMORY;
456
457	status_t error = map->Init(kernel);
458	if (error != B_OK) {
459		delete map;
460		return error;
461	}
462
463	*_map = map;
464	return B_OK;
465}
466
467
468status_t
469M68KPagingMethod040::MapEarly(kernel_args* args, addr_t virtualAddress,
470	phys_addr_t physicalAddress, uint8 attributes,
471	phys_addr_t (*get_free_page)(kernel_args*))
472{
473	// XXX horrible back door to map a page quickly regardless of translation
474	// map object, etc. used only during VM setup.
475	// uses a 'page hole' set up in the stage 2 bootloader. The page hole is
476	// created by pointing one of the pgdir entries back at itself, effectively
477	// mapping the contents of all of the 4MB of pagetables into a 4 MB region.
478	// It's only used here, and is later unmapped.
479
480	addr_t va = virtualAddress;
481	phys_addr_t pa = physicalAddress;
482	page_root_entry *pr = (page_root_entry *)fKernelPhysicalPageRoot;
483	page_directory_entry *pd;
484	page_table_entry *pt;
485	addr_t tbl;
486	uint32 index;
487	uint32 i;
488	TRACE("040::MapEarly: entry pa 0x%lx va 0x%lx\n", pa, va);
489
490	// everything much simpler here because pa = va
491	// thanks to transparent translation which hasn't been disabled yet
492
493	index = VADDR_TO_PRENT(va);
494	if (PRE_TYPE(pr[index]) != DT_ROOT) {
495		unsigned aindex = index & ~(NUM_DIRTBL_PER_PAGE-1); /* aligned */
496		TRACE("missing page root entry %d ai %d\n", index, aindex);
497		tbl = get_free_page(args) * B_PAGE_SIZE;
498		if (!tbl)
499			return ENOMEM;
500		TRACE("040::MapEarly: asked for free page for pgdir. 0x%lx\n", tbl);
501		// zero-out
502		memset((void *)tbl, 0, B_PAGE_SIZE);
503		// for each pgdir on the allocated page:
504		for (i = 0; i < NUM_DIRTBL_PER_PAGE; i++) {
505			PutPageDirInPageRoot(&pr[aindex + i], tbl, attributes);
506			//TRACE("inserting tbl @ %p as %08x pr[%d] %08x\n", tbl, TA_TO_PREA(tbl), aindex + i, *(uint32 *)apr);
507			// clear the table
508			//TRACE("clearing table[%d]\n", i);
509			pd = (page_directory_entry *)tbl;
510			for (int32 j = 0; j < NUM_DIRENT_PER_TBL; j++)
511				pd[j] = DFL_DIRENT_VAL;
512			tbl += SIZ_DIRTBL;
513		}
514	}
515	pd = (page_directory_entry *)PRE_TO_TA(pr[index]);
516
517	index = VADDR_TO_PDENT(va);
518	if (PDE_TYPE(pd[index]) != DT_DIR) {
519		unsigned aindex = index & ~(NUM_PAGETBL_PER_PAGE-1); /* aligned */
520		TRACE("missing page dir entry %d ai %d\n", index, aindex);
521		tbl = get_free_page(args) * B_PAGE_SIZE;
522		if (!tbl)
523			return ENOMEM;
524		TRACE("early_map: asked for free page for pgtable. 0x%lx\n", tbl);
525		// zero-out
526		memset((void *)tbl, 0, B_PAGE_SIZE);
527		// for each pgdir on the allocated page:
528		for (i = 0; i < NUM_PAGETBL_PER_PAGE; i++) {
529			PutPageTableInPageDir(&pd[aindex + i], tbl, attributes);
530			// clear the table
531			//TRACE("clearing table[%d]\n", i);
532			pt = (page_table_entry *)tbl;
533			for (int32 j = 0; j < NUM_PAGEENT_PER_TBL; j++)
534				pt[j] = DFL_PAGEENT_VAL;
535			tbl += SIZ_PAGETBL;
536		}
537	}
538	pt = (page_table_entry *)PDE_TO_TA(pd[index]);
539
540	index = VADDR_TO_PTENT(va);
541	// now, fill in the pentry
542	PutPageTableEntryInTable(&pt[index],
543		physicalAddress, attributes, 0, IS_KERNEL_ADDRESS(virtualAddress));
544
545	arch_cpu_invalidate_TLB_range(va, va);
546
547	return B_OK;
548
549
550
551#if 0
552	// check to see if a page table exists for this range
553	int index = VADDR_TO_PDENT(virtualAddress);
554	if ((fPageHolePageDir[index] & M68K_PDE_PRESENT) == 0) {
555		phys_addr_t pgtable;
556		page_directory_entry *e;
557		// we need to allocate a pgtable
558		pgtable = get_free_page(args);
559		// pgtable is in pages, convert to physical address
560		pgtable *= B_PAGE_SIZE;
561
562		TRACE("M68KPagingMethod040::MapEarly(): asked for free page for "
563			"pgtable. %#" B_PRIxPHYSADDR "\n", pgtable);
564
565		// put it in the pgdir
566		e = &fPageHolePageDir[index];
567		PutPageTableInPageDir(e, pgtable, attributes);
568
569		// zero it out in it's new mapping
570		memset((unsigned int*)((addr_t)fPageHole
571				+ (virtualAddress / B_PAGE_SIZE / 1024) * B_PAGE_SIZE),
572			0, B_PAGE_SIZE);
573	}
574
575	ASSERT_PRINT(
576		(fPageHole[virtualAddress / B_PAGE_SIZE] & M68K_PTE_PRESENT) == 0,
577		"virtual address: %#" B_PRIxADDR ", pde: %#" B_PRIx32
578		", existing pte: %#" B_PRIx32, virtualAddress, fPageHolePageDir[index],
579		fPageHole[virtualAddress / B_PAGE_SIZE]);
580
581#endif
582
583	return B_OK;
584}
585
586
587bool
588M68KPagingMethod040::IsKernelPageAccessible(addr_t virtualAddress,
589	uint32 protection)
590{
591#warning M68K: WRITEME
592	return false;
593}
594
595
596void
597M68KPagingMethod040::SetPageRoot(uint32 pageRoot)
598{
599#warning M68K:TODO:override this for 060
600	uint32 rp;
601	rp = pageRoot & ~((1 << 9) - 1);
602
603	asm volatile(          \
604		"movec %0,%%srp\n" \
605		"movec %0,%%urp\n" \
606		: : "d"(rp));
607}
608
609
610/*static*/ void
611M68KPagingMethod040::PutPageDirInPageRoot(page_root_entry* entry,
612	phys_addr_t pgdirPhysical, uint32 attributes)
613{
614	*entry = TA_TO_PREA(pgdirPhysical)
615		| DT_DIR;	// it's a page directory entry
616
617	// ToDo: we ignore the attributes of the page table - for compatibility
618	//	with BeOS we allow having user accessible areas in the kernel address
619	//	space. This is currently being used by some drivers, mainly for the
620	//	frame buffer. Our current real time data implementation makes use of
621	//	this fact, too.
622	//	We might want to get rid of this possibility one day, especially if
623	//	we intend to port it to a platform that does not support this.
624	//table.user = 1;
625	//table.rw = 1;
626}
627
628
629/*static*/ void
630M68KPagingMethod040::PutPageTableInPageDir(page_directory_entry* entry,
631	phys_addr_t pgtablePhysical, uint32 attributes)
632{
633	*entry = TA_TO_PDEA(pgtablePhysical)
634		| DT_DIR;	// it's a page directory entry
635
636}
637
638
639/*static*/ void
640M68KPagingMethod040::PutPageTableEntryInTable(page_table_entry* entry,
641	phys_addr_t physicalAddress, uint32 attributes, uint32 memoryType,
642	bool globalPage)
643{
644	page_table_entry page = TA_TO_PTEA(physicalAddress)
645		| DT_PAGE
646#ifdef PAGE_HAS_GLOBAL_BIT
647		| (globalPage ? M68K_PTE_GLOBAL : 0)
648#endif
649		| MemoryTypeToPageTableEntryFlags(memoryType);
650
651	// if the page is user accessible, it's automatically
652	// accessible in kernel space, too (but with the same
653	// protection)
654	if ((attributes & B_USER_PROTECTION) == 0) {
655		page |= M68K_PTE_SUPERVISOR;
656		if ((attributes & B_KERNEL_WRITE_AREA) == 0)
657			page |= M68K_PTE_READONLY;
658	} else if ((attributes & B_WRITE_AREA) == 0)
659		page |= M68K_PTE_READONLY;
660
661
662	// put it in the page table
663	*(volatile page_table_entry*)entry = page;
664}
665
666/*static*/ void
667M68KPagingMethod040::_EarlyPreparePageTables(page_table_entry* pageTables,
668	addr_t address, size_t size)
669{
670	memset(pageTables, 0, B_PAGE_SIZE *
671		(size / (B_PAGE_SIZE * NUM_PAGEENT_PER_TBL * NUM_PAGETBL_PER_PAGE)));
672
673	// put the array of pgtables directly into the kernel pagedir
674	// these will be wired and kept mapped into virtual space to be easy to get
675	// to
676	// note the bootloader allocates all page directories for us
677	// as a contiguous block.
678	// we also still have transparent translation enabled, va==pa.
679	{
680		size_t index;
681		addr_t virtualTable = (addr_t)pageTables;
682		page_root_entry *pr
683			= M68KPagingMethod040::Method()->fKernelVirtualPageRoot;
684		page_directory_entry *pd;
685		page_directory_entry *e;
686
687		for (size_t i = 0; i < (size / (B_PAGE_SIZE * NUM_PAGEENT_PER_TBL));
688				i++, virtualTable += SIZ_PAGETBL) {
689			// early_query handles non-page-aligned addresses
690			phys_addr_t physicalTable = 0;
691			_EarlyQuery(virtualTable, &physicalTable);
692			index = VADDR_TO_PRENT(address) + i / NUM_DIRENT_PER_TBL;
693			pd = (page_directory_entry *)PRE_TO_TA(pr[index]);
694			e = &pd[(VADDR_TO_PDENT(address) + i) % NUM_DIRENT_PER_TBL];
695			PutPageTableInPageDir(e, physicalTable,
696				B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
697		}
698	}
699}
700
701
702//! TODO: currently assumes this translation map is active
703/*static*/ status_t
704M68KPagingMethod040::_EarlyQuery(addr_t virtualAddress,
705	phys_addr_t *_physicalAddress)
706{
707
708	M68KPagingMethod040* method = M68KPagingMethod040::Method();
709
710	page_root_entry *pr = method->fKernelVirtualPageRoot;
711	page_directory_entry *pd;
712	page_indirect_entry *pi;
713	page_table_entry *pt;
714	addr_t pa;
715	int32 index;
716	status_t err = B_ERROR;	// no pagetable here
717	TRACE("%s(%p,)\n", __FUNCTION__, virtualAddress);
718
719	// this is used before the vm is fully up, it uses the
720	// transparent translation of the first 256MB
721	// as set up by the bootloader.
722
723	index = VADDR_TO_PRENT(virtualAddress);
724	TRACE("%s: pr[%d].type %d\n", __FUNCTION__, index, PRE_TYPE(pr[index]));
725	if (pr && PRE_TYPE(pr[index]) == DT_ROOT) {
726		pa = PRE_TO_TA(pr[index]);
727		// pa == va when in TT
728		// and no need to fiddle with cache
729		pd = (page_directory_entry *)pa;
730
731		index = VADDR_TO_PDENT(virtualAddress);
732		TRACE("%s: pd[%d].type %d\n", __FUNCTION__, index,
733				pd?(PDE_TYPE(pd[index])):-1);
734		if (pd && PDE_TYPE(pd[index]) == DT_DIR) {
735			pa = PDE_TO_TA(pd[index]);
736			pt = (page_table_entry *)pa;
737
738			index = VADDR_TO_PTENT(virtualAddress);
739			TRACE("%s: pt[%d].type %d\n", __FUNCTION__, index,
740					pt?(PTE_TYPE(pt[index])):-1);
741			if (pt && PTE_TYPE(pt[index]) == DT_INDIRECT) {
742				pi = (page_indirect_entry *)pt;
743				pa = PIE_TO_TA(pi[index]);
744				pt = (page_table_entry *)pa;
745				index = 0; // single descriptor
746			}
747
748			if (pt && PIE_TYPE(pt[index]) == DT_PAGE) {
749				*_physicalAddress = PTE_TO_PA(pt[index]);
750				// we should only be passed page va, but just in case.
751				*_physicalAddress += virtualAddress % B_PAGE_SIZE;
752				err = B_OK;
753			}
754		}
755	}
756
757	return err;
758
759#if 0
760
761	int index = VADDR_TO_PDENT(virtualAddress);
762	if ((method->PageHolePageDir()[index] & M68K_PDE_PRESENT) == 0) {
763		// no pagetable here
764		return B_ERROR;
765	}
766
767	page_table_entry* entry = method->PageHole() + virtualAddress / B_PAGE_SIZE;
768	if ((*entry & M68K_PTE_PRESENT) == 0) {
769		// page mapping not valid
770		return B_ERROR;
771	}
772
773	*_physicalAddress = *entry & M68K_PTE_ADDRESS_MASK;
774	return B_OK;
775#endif
776}
777