1/*
2 * Copyright 2003-2009, Axel D��rfler, axeld@pinc-software.de.
3 * Copyright 2010-2011, Haiku, Inc. All Rights Reserved.
4 * All rights reserved. Distributed under the terms of the MIT License.
5 *
6 * Authors:
7 *		Axel D��rfler, axeld@pinc-software.de.
8 *		Alexander von Gluck, kallisti5@unixzen.com
9 */
10
11
12#include <OS.h>
13
14#include <platform_arch.h>
15#include <boot/addr_range.h>
16#include <boot/kernel_args.h>
17#include <boot/platform.h>
18#include <boot/stage2.h>
19#include <boot/stdio.h>
20#include <platform/openfirmware/openfirmware.h>
21#include <arch_cpu.h>
22#include <arch_mmu.h>
23#include <kernel.h>
24
25#include "support.h"
26
27
28#define PAGE_READ_ONLY	0x0002
29#define PAGE_READ_WRITE	0x0001
30
31// NULL is actually a possible physical address, so use -1 (which is
32// misaligned, so not a valid address) as the invalid physical address.
33#define PHYSINVAL ((void *)-1)
34//#define PHYSINVAL NULL
35
36//#define TRACE_MMU
37#ifdef TRACE_MMU
38#   define TRACE(x...) dprintf(x)
39#else
40#   define TRACE(x...) ;
41#endif
42
43
44unsigned int sMmuInstance;
45unsigned int sMemoryInstance;
46
47
48// begin and end of the boot loader
49extern "C" uint8 __text_begin;
50extern "C" uint8 _end;
51
52
53static status_t
54insert_virtual_range_to_keep(void *start, uint32 size)
55{
56	return insert_address_range(gKernelArgs.arch_args.virtual_ranges_to_keep,
57		&gKernelArgs.arch_args.num_virtual_ranges_to_keep,
58		MAX_VIRTUAL_RANGES_TO_KEEP, (addr_t)start, size);
59}
60
61
62static status_t
63remove_virtual_range_to_keep(void *start, uint32 size)
64{
65	return remove_address_range(gKernelArgs.arch_args.virtual_ranges_to_keep,
66		&gKernelArgs.arch_args.num_virtual_ranges_to_keep,
67		MAX_VIRTUAL_RANGES_TO_KEEP, (addr_t)start, size);
68}
69
70
71static status_t
72find_physical_memory_ranges(size_t &total)
73{
74	TRACE("checking for memory...\n");
75	intptr_t package = of_instance_to_package(sMemoryInstance);
76
77	total = 0;
78
79	// Memory base addresses are provided in 32 or 64 bit flavors
80	// #address-cells and #size-cells matches the number of 32-bit 'cells'
81	// representing the length of the base address and size fields
82	intptr_t root = of_finddevice("/");
83	int32 regSizeCells = of_size_cells(root);
84	if (regSizeCells == OF_FAILED) {
85		dprintf("finding size of memory cells failed, assume 32-bit.\n");
86		regSizeCells = 1;
87	}
88
89	int32 regAddressCells = of_address_cells(root);
90	if (regAddressCells == OF_FAILED) {
91		// Sun Netra T1-105 is missing this, but we can guess that if the size
92		// is 64bit, the address also likely is.
93		regAddressCells = regSizeCells;
94	}
95
96	if (regAddressCells != 2 || regSizeCells != 2) {
97		panic("%s: Unsupported OpenFirmware cell count detected.\n"
98		"Address Cells: %" B_PRId32 "; Size Cells: %" B_PRId32
99		" (CPU > 64bit?).\n", __func__, regAddressCells, regSizeCells);
100		return B_ERROR;
101	}
102
103	static struct of_region<uint64, uint64> regions[64];
104	int count = of_getprop(package, "reg", regions, sizeof(regions));
105	if (count == OF_FAILED)
106		count = of_getprop(sMemoryInstance, "reg", regions, sizeof(regions));
107	if (count == OF_FAILED)
108		return B_ERROR;
109	count /= sizeof(regions[0]);
110
111	for (int32 i = 0; i < count; i++) {
112		if (regions[i].size <= 0) {
113			TRACE("%d: empty region\n", i);
114			continue;
115		}
116		TRACE("%" B_PRIu32 ": base = %" B_PRIx64 ","
117			"size = %" B_PRIx64 "\n", i, regions[i].base, regions[i].size);
118
119		total += regions[i].size;
120
121		if (insert_physical_memory_range((addr_t)regions[i].base,
122				regions[i].size) != B_OK) {
123			dprintf("cannot map physical memory range "
124				"(num ranges = %" B_PRIu32 ")!\n",
125				gKernelArgs.num_physical_memory_ranges);
126			return B_ERROR;
127		}
128	}
129
130	return B_OK;
131}
132
133
134static bool
135is_virtual_allocated(void *address, size_t size)
136{
137	uint64 foundBase;
138	return !get_free_address_range(gKernelArgs.virtual_allocated_range,
139		gKernelArgs.num_virtual_allocated_ranges, (addr_t)address, size,
140		&foundBase) || foundBase != (addr_t)address;
141}
142
143
144static bool
145is_physical_allocated(void *address, size_t size)
146{
147	uint64 foundBase;
148	return !get_free_address_range(gKernelArgs.physical_allocated_range,
149		gKernelArgs.num_physical_allocated_ranges, (addr_t)address, size,
150		&foundBase) || foundBase != (addr_t)address;
151}
152
153
154static bool
155is_physical_memory(void *address, size_t size = 1)
156{
157	return is_address_range_covered(gKernelArgs.physical_memory_range,
158		gKernelArgs.num_physical_memory_ranges, (addr_t)address, size);
159}
160
161
162static bool
163map_range(void *virtualAddress, void *physicalAddress, size_t size, uint16 mode)
164{
165	// everything went fine, so lets mark the space as used.
166	int status = of_call_method(sMmuInstance, "map", 5, 0, (uint64)mode, size,
167		virtualAddress, 0, physicalAddress);
168
169	if (status != 0) {
170		dprintf("map_range(base: %p, size: %" B_PRIuSIZE ") "
171			"mapping failed\n", virtualAddress, size);
172		return false;
173	}
174
175	return true;
176}
177
178
179static status_t
180find_allocated_ranges(void **_exceptionHandlers)
181{
182	// we have to preserve the OpenFirmware established mappings
183	// if we want to continue to use its service after we've
184	// taken over (we will probably need less translations once
185	// we have proper driver support for the target hardware).
186	intptr_t mmu = of_instance_to_package(sMmuInstance);
187
188	static struct translation_map {
189		void *PhysicalAddress() {
190			int64_t p = data;
191#if 0
192			// The openboot own "map?" word does not do this, so it must not
193			// be needed
194			// Sign extend
195			p <<= 23;
196			p >>= 23;
197#endif
198
199			// Keep only PA[40:13]
200			// FIXME later CPUs have some more bits here
201			p &= 0x000001FFFFFFE000ll;
202
203			return (void*)p;
204		}
205
206		int16_t Mode() {
207			int16_t mode;
208			if (data & 2)
209				mode = PAGE_READ_WRITE;
210			else
211				mode = PAGE_READ_ONLY;
212			return mode;
213		}
214
215		void	*virtual_address;
216		intptr_t length;
217		intptr_t data;
218	} translations[64];
219
220	int length = of_getprop(mmu, "translations", &translations,
221		sizeof(translations));
222	if (length == OF_FAILED) {
223		dprintf("Error: no OF translations.\n");
224		return B_ERROR;
225	}
226	length = length / sizeof(struct translation_map);
227	uint32 total = 0;
228	TRACE("found %d translations\n", length);
229
230	for (int i = 0; i < length; i++) {
231		struct translation_map *map = &translations[i];
232		bool keepRange = true;
233		TRACE("%i: map: %p, length %ld -> phy %p mode %d: ", i,
234			map->virtual_address, map->length,
235			map->PhysicalAddress(), map->Mode());
236
237		// insert range in physical allocated, if it points to physical memory
238
239		if (is_physical_memory(map->PhysicalAddress())
240			&& insert_physical_allocated_range((addr_t)map->PhysicalAddress(),
241				map->length) != B_OK) {
242			dprintf("cannot map physical allocated range "
243				"(num ranges = %" B_PRIu32 ")!\n",
244				gKernelArgs.num_physical_allocated_ranges);
245			return B_ERROR;
246		}
247
248		// insert range in virtual allocated
249
250		if (insert_virtual_allocated_range((addr_t)map->virtual_address,
251				map->length) != B_OK) {
252			dprintf("cannot map virtual allocated range "
253				"(num ranges = %" B_PRIu32 ")!\n",
254				gKernelArgs.num_virtual_allocated_ranges);
255		}
256
257		// insert range in virtual ranges to keep
258
259		if (keepRange) {
260			TRACE("keeping\n");
261
262			if (insert_virtual_range_to_keep(map->virtual_address,
263					map->length) != B_OK) {
264				dprintf("cannot map virtual range to keep "
265					"(num ranges = %" B_PRIu32 ")\n",
266					gKernelArgs.num_virtual_allocated_ranges);
267			}
268		} else {
269			TRACE("dropping\n");
270		}
271
272		total += map->length;
273	}
274	TRACE("total size kept: %" B_PRIu32 "\n", total);
275
276	// remove the boot loader code from the virtual ranges to keep in the
277	// kernel
278	if (remove_virtual_range_to_keep(&__text_begin, &_end - &__text_begin)
279			!= B_OK) {
280		dprintf("%s: Failed to remove boot loader range "
281			"from virtual ranges to keep.\n", __func__);
282	}
283
284	return B_OK;
285}
286
287
288static void *
289find_physical_memory_range(size_t size)
290{
291	for (uint32 i = 0; i < gKernelArgs.num_physical_memory_ranges; i++) {
292		if (gKernelArgs.physical_memory_range[i].size > size)
293			return (void *)(addr_t)gKernelArgs.physical_memory_range[i].start;
294	}
295	return PHYSINVAL;
296}
297
298
299static void *
300find_free_physical_range(size_t size)
301{
302	// If nothing is allocated, just return the first address in RAM
303	if (gKernelArgs.num_physical_allocated_ranges == 0) {
304		if (gKernelArgs.num_physical_memory_ranges == 0)
305			return PHYSINVAL;
306
307		return find_physical_memory_range(size);
308	}
309
310	// Try to find space after an already allocated range
311	for (uint32 i = 0; i < gKernelArgs.num_physical_allocated_ranges; i++) {
312		void *address
313			= (void *)(addr_t)(gKernelArgs.physical_allocated_range[i].start
314				+ gKernelArgs.physical_allocated_range[i].size);
315		if (!is_physical_allocated(address, size)
316			&& is_physical_memory(address, size)) {
317			return address;
318		}
319	}
320
321	// Check if there is enough space at the start of one of the physical ranges
322	// (that memory isn't after an already allocated range so it wouldn't be
323	// found by the method above for ranges where there isn't already an initial
324	// allocation at the start)
325	for (uint32 i = 0; i < gKernelArgs.num_physical_memory_ranges; i++) {
326		void *address = (void *)gKernelArgs.physical_memory_range[i].start;
327		if (gKernelArgs.physical_memory_range[i].size > size
328			&& !is_physical_allocated(address, size)) {
329			return address;
330		}
331	}
332
333	// We're really out of memory
334	return PHYSINVAL;
335}
336
337
338static void *
339find_free_virtual_range(void *base, size_t size)
340{
341	if (base && !is_virtual_allocated(base, size))
342		return base;
343
344	void *firstFound = NULL;
345	void *firstBaseFound = NULL;
346	for (uint32 i = 0; i < gKernelArgs.num_virtual_allocated_ranges; i++) {
347		void *address
348			= (void *)(addr_t)(gKernelArgs.virtual_allocated_range[i].start
349				+ gKernelArgs.virtual_allocated_range[i].size);
350		if (!is_virtual_allocated(address, size)) {
351			if (!base)
352				return address;
353
354			if (firstFound == NULL)
355				firstFound = address;
356			if (address >= base
357				&& (firstBaseFound == NULL || address < firstBaseFound)) {
358				firstBaseFound = address;
359			}
360		}
361	}
362	return (firstBaseFound ? firstBaseFound : firstFound);
363}
364
365
366extern "C" void *
367arch_mmu_allocate(void *_virtualAddress, size_t size, uint8 _protection,
368	bool exactAddress)
369{
370	// we only know page sizes
371	size = ROUNDUP(size, B_PAGE_SIZE);
372
373	uint8 protection = 0;
374	if (_protection & B_WRITE_AREA)
375		protection = PAGE_READ_WRITE;
376	else
377		protection = PAGE_READ_ONLY;
378
379	// If no address is given, use the KERNEL_BASE as base address, since
380	// that avoids trouble in the kernel, when we decide to keep the region.
381	void *virtualAddress = _virtualAddress;
382#if 0
383	if (!virtualAddress)
384		virtualAddress = (void*)KERNEL_BASE;
385#endif
386
387	// find free address large enough to hold "size"
388	virtualAddress = find_free_virtual_range(virtualAddress, size);
389	if (virtualAddress == NULL)
390		return NULL;
391
392	// fail if the exact address was requested, but is not free
393	if (exactAddress && _virtualAddress && virtualAddress != _virtualAddress) {
394		dprintf("arch_mmu_allocate(): exact address requested, but virtual "
395			"range (base: %p, size: %" B_PRIuSIZE ") is not free.\n",
396			_virtualAddress, size);
397		return NULL;
398	}
399
400#if 0
401	intptr_t status;
402
403	/* claim the address */
404	status = of_call_method(sMmuInstance, "claim", 3, 1, 0, size,
405		virtualAddress, &_virtualAddress);
406	if (status != 0) {
407		dprintf("arch_mmu_allocate(base: %p, size: %" B_PRIuSIZE ") "
408			"failed to claim virtual address\n", virtualAddress, size);
409		return NULL;
410	}
411
412#endif
413	// we have a free virtual range for the allocation, now
414	// have a look for free physical memory as well (we assume
415	// that a) there is enough memory, and b) failing is fatal
416	// so that we don't have to optimize for these cases :)
417
418	void *physicalAddress = find_free_physical_range(size);
419	if (physicalAddress == PHYSINVAL) {
420		dprintf("arch_mmu_allocate(base: %p, size: %" B_PRIuSIZE ") "
421			"no free physical address\n", virtualAddress, size);
422		return NULL;
423	}
424
425	// everything went fine, so lets mark the space as used.
426
427#if 0
428	void* _physicalAddress;
429	status = of_call_method(sMemoryInstance, "claim", 3, 1, physicalAddress,
430		1, size, &_physicalAddress);
431
432	if (status != 0) {
433		dprintf("arch_mmu_allocate(base: %p, size: %" B_PRIuSIZE ") "
434			"failed to claim physical address\n", physicalAddress, size);
435		return NULL;
436	}
437#endif
438
439	insert_virtual_allocated_range((addr_t)virtualAddress, size);
440	insert_physical_allocated_range((addr_t)physicalAddress, size);
441
442	if (!map_range(virtualAddress, physicalAddress, size, protection))
443		return NULL;
444
445	return virtualAddress;
446}
447
448
449extern "C" status_t
450arch_mmu_free(void *address, size_t size)
451{
452	// TODO: implement freeing a region!
453	return B_OK;
454}
455
456
457//	#pragma mark - OpenFirmware callbacks and public API
458
459
460#if 0
461static int
462map_callback(struct of_arguments *args)
463{
464	void *physicalAddress = (void *)args->Argument(0);
465	void *virtualAddress = (void *)args->Argument(1);
466	int length = args->Argument(2);
467	int mode = args->Argument(3);
468	intptr_t &error = args->ReturnValue(0);
469
470	// insert range in physical allocated if needed
471
472	if (is_physical_memory(physicalAddress)
473		&& insert_physical_allocated_range((addr_t)physicalAddress, length)
474			!= B_OK) {
475		error = -1;
476		return OF_FAILED;
477	}
478
479	// insert range in virtual allocated
480
481	if (insert_virtual_allocated_range((addr_t)virtualAddress, length)
482			!= B_OK) {
483		error = -2;
484		return OF_FAILED;
485	}
486
487	// map range into the page table
488
489	map_range(virtualAddress, physicalAddress, length, mode);
490
491	return B_OK;
492}
493
494
495static int
496unmap_callback(struct of_arguments *args)
497{
498/*	void *address = (void *)args->Argument(0);
499	int length = args->Argument(1);
500	int &error = args->ReturnValue(0);
501*/
502	// TODO: to be implemented
503
504	return OF_FAILED;
505}
506
507
508static int
509translate_callback(struct of_arguments *args)
510{
511	// could not find the translation
512	return OF_FAILED;
513}
514
515
516static int
517alloc_real_mem_callback(struct of_arguments *args)
518{
519/*	addr_t minAddress = (addr_t)args->Argument(0);
520	addr_t maxAddress = (addr_t)args->Argument(1);
521	int length = args->Argument(2);
522	int mode = args->Argument(3);
523	int &error = args->ReturnValue(0);
524	int &physicalAddress = args->ReturnValue(1);
525*/
526	// ToDo: to be implemented
527
528	return OF_FAILED;
529}
530
531
532/** Dispatches the callback to the responsible function */
533
534static int
535callback(struct of_arguments *args)
536{
537	const char *name = args->name;
538	TRACE("OF CALLBACK: %s\n", name);
539
540	if (!strcmp(name, "map"))
541		return map_callback(args);
542	else if (!strcmp(name, "unmap"))
543		return unmap_callback(args);
544	else if (!strcmp(name, "translate"))
545		return translate_callback(args);
546	else if (!strcmp(name, "alloc-real-mem"))
547		return alloc_real_mem_callback(args);
548
549	return OF_FAILED;
550}
551#endif
552
553
554extern "C" status_t
555arch_set_callback(void)
556{
557#if 0
558	// set OpenFirmware callbacks - it will ask us for memory after that
559	// instead of maintaining it itself
560
561	void *oldCallback = NULL;
562	if (of_call_client_function("set-callback", 1, 1, &callback, &oldCallback)
563			== OF_FAILED) {
564		dprintf("Error: OpenFirmware set-callback failed\n");
565		return B_ERROR;
566	}
567	TRACE("old callback = %p; new callback = %p\n", oldCallback, callback);
568#endif
569
570	return B_OK;
571}
572
573
574extern "C" status_t
575arch_mmu_init(void)
576{
577	if (of_getprop(gChosen, "mmu", &sMmuInstance, sizeof(int)) == OF_FAILED) {
578		dprintf("%s: Error: no OpenFirmware mmu\n", __func__);
579		return B_ERROR;
580	}
581
582	if (of_getprop(gChosen, "memory", &sMemoryInstance, sizeof(int)) == OF_FAILED) {
583		dprintf("%s: Error: no OpenFirmware memory\n", __func__);
584		return B_ERROR;
585	}
586	// get map of physical memory (fill in kernel_args structure)
587
588	size_t total;
589	if (find_physical_memory_ranges(total) != B_OK) {
590		dprintf("Error: could not find physical memory ranges!\n");
591		return B_ERROR;
592	}
593	TRACE("total physical memory = %luMB\n", total / (1024 * 1024));
594
595	void *exceptionHandlers = (void *)-1;
596	if (find_allocated_ranges(&exceptionHandlers) != B_OK) {
597		dprintf("Error: find_allocated_ranges() failed\n");
598		return B_ERROR;
599	}
600
601#if 0
602	if (exceptionHandlers == (void *)-1) {
603		// TODO: create mapping for the exception handlers
604		dprintf("Error: no mapping for the exception handlers!\n");
605	}
606
607	// Set the Open Firmware memory callback. From now on the Open Firmware
608	// will ask us for memory.
609	arch_set_callback();
610
611	// set up new page table and turn on translation again
612	// TODO "set up new page table and turn on translation again" (see PPC)
613#endif
614
615	// set kernel args
616
617	TRACE("virt_allocated: %" B_PRIu32 "\n",
618		gKernelArgs.num_virtual_allocated_ranges);
619	TRACE("phys_allocated: %" B_PRIu32 "\n",
620		gKernelArgs.num_physical_allocated_ranges);
621	TRACE("phys_memory: %" B_PRIu32 "\n",
622		gKernelArgs.num_physical_memory_ranges);
623
624#if 0
625	// TODO set gKernelArgs.arch_args content if we have something to put in there
626	gKernelArgs.arch_args.page_table.start = (addr_t)sPageTable;
627	gKernelArgs.arch_args.page_table.size = tableSize;
628
629	gKernelArgs.arch_args.exception_handlers.start = (addr_t)exceptionHandlers;
630	gKernelArgs.arch_args.exception_handlers.size = B_PAGE_SIZE;
631#endif
632
633	return B_OK;
634}
635
636