1/*
2 * Copyright 2001, Travis Geiselbrecht. All rights reserved.
3 * Copyright 2003-2005, Axel D��rfler, axeld@pinc-software.de.
4 * Copyright 2019, Adrien Destugues, pulkomandy@pulkomandy.tk
5 * Distributed under the terms of the MIT License.
6 */
7
8#include <vm/vm.h>
9#include <vm/VMAddressSpace.h>
10#include <arch/vm.h>
11#include <boot/kernel_args.h>
12
13#include "RISCV64VMTranslationMap.h"
14
15
16#define TRACE_ARCH_VM
17#ifdef TRACE_ARCH_VM
18#	define TRACE(x) dprintf x
19#else
20#	define TRACE(x) ;
21#endif
22
23
24static uint64_t
25SignExtendVirtAdr(uint64_t virtAdr)
26{
27	if (((uint64_t)1 << 38) & virtAdr)
28		return virtAdr | 0xFFFFFF8000000000;
29	return virtAdr;
30}
31
32
33static Pte*
34LookupPte(phys_addr_t pageTable, addr_t virtAdr)
35{
36	Pte *pte = (Pte*)VirtFromPhys(pageTable);
37	for (int level = 2; level > 0; level --) {
38		pte += VirtAdrPte(virtAdr, level);
39		if (!pte->isValid) {
40			return NULL;
41		}
42		// TODO: Handle superpages (RWX=0 when not at lowest level)
43		pte = (Pte*)VirtFromPhys(B_PAGE_SIZE * pte->ppn);
44	}
45	pte += VirtAdrPte(virtAdr, 0);
46	return pte;
47}
48
49
50
51static void
52WritePteFlags(uint32 flags)
53{
54	bool first = true;
55	dprintf("{");
56	for (uint32 i = 0; i < 32; i++) {
57		if ((1 << i) & flags) {
58			if (first)
59				first = false;
60			else
61				dprintf(", ");
62
63			switch (i) {
64				case 0:
65					dprintf("valid");
66					break;
67				case 1:
68					dprintf("read");
69					break;
70				case 2:
71					dprintf("write");
72					break;
73				case 3:
74					dprintf("exec");
75					break;
76				case 4:
77					dprintf("user");
78					break;
79				case 5:
80					dprintf("global");
81					break;
82				case 6:
83					dprintf("accessed");
84					break;
85				case 7:
86					dprintf("dirty");
87					break;
88				default:
89					dprintf("%" B_PRIu32, i);
90			}
91		}
92	}
93	dprintf("}");
94}
95
96
97class PageTableDumper
98{
99private:
100	uint64 firstVirt;
101	uint64 firstPhys;
102	uint64 firstFlags;
103	uint64 len;
104
105public:
106	PageTableDumper()
107		:
108		firstVirt(0),
109		firstPhys(0),
110		firstFlags(0),
111		len(0)
112	{}
113
114	~PageTableDumper()
115	{
116		Write(0, 0, 0, 0);
117	}
118
119	void Write(uint64_t virtAdr, uint64_t physAdr, size_t size, uint64 flags) {
120		if (virtAdr == firstVirt + len && physAdr == firstPhys + len && flags == firstFlags) {
121			len += size;
122		} else {
123			if (len != 0) {
124				dprintf("  0x%08" B_PRIxADDR " - 0x%08" B_PRIxADDR,
125				firstVirt, firstVirt + (len - 1));
126				dprintf(": 0x%08" B_PRIxADDR " - 0x%08" B_PRIxADDR ", %#" B_PRIxADDR ", ",
127					firstPhys, firstPhys + (len - 1), len);
128				WritePteFlags(firstFlags); dprintf("\n");
129			}
130			firstVirt = virtAdr;
131			firstPhys = physAdr;
132			firstFlags = flags;
133			len = size;
134		}
135	}
136};
137
138
139static void
140DumpPageTableInt(Pte* pte, uint64_t virtAdr, uint32_t level, PageTableDumper& dumper)
141{
142	for (uint32 i = 0; i < pteCount; i++) {
143		if (pte[i].isValid) {
144			if (!pte[i].isRead && !pte[i].isWrite && !pte[i].isExec) {
145
146				if (level == 0)
147					kprintf("  internal page table on level 0\n");
148
149				DumpPageTableInt((Pte*)VirtFromPhys(B_PAGE_SIZE*pte[i].ppn),
150					virtAdr + ((uint64_t)i << (pageBits + pteIdxBits * level)),
151					level - 1, dumper);
152			} else {
153				dumper.Write(SignExtendVirtAdr(virtAdr
154						+ ((uint64_t)i << (pageBits + pteIdxBits*level))),
155					pte[i].ppn * B_PAGE_SIZE, 1 << (pageBits + pteIdxBits * level),
156					pte[i].val & 0xff);
157			}
158		}
159	}
160}
161
162
163static int
164DumpPageTable(int argc, char** argv)
165{
166	int curArg = 1;
167	SatpReg satp;
168	bool isArea = false;
169	addr_t base = 0;
170	size_t size = 0;
171
172	satp.val = Satp();
173	while (curArg < argc && argv[curArg][0] == '-') {
174		if (strcmp(argv[curArg], "-team") == 0) {
175			curArg++;
176			team_id id = strtoul(argv[curArg++], NULL, 0);
177			VMAddressSpace* addrSpace = VMAddressSpace::DebugGet(id);
178			if (addrSpace == NULL) {
179				kprintf("could not find team %" B_PRId32 "\n", id);
180				return 0;
181			}
182			satp.val = ((RISCV64VMTranslationMap*)
183				addrSpace->TranslationMap())->Satp();
184			isArea = false;
185		} else if (strcmp(argv[curArg], "-area") == 0) {
186			curArg++;
187			uint64 areaId;
188			if (!evaluate_debug_expression(argv[curArg++], &areaId, false))
189				return 0;
190			VMArea* area = VMAreas::Lookup((area_id)areaId);
191			if (area == NULL) {
192				kprintf("could not find area %" B_PRId32 "\n", (area_id)areaId);
193				return 0;
194			}
195			satp.val = ((RISCV64VMTranslationMap*)
196				area->address_space->TranslationMap())->Satp();
197			base = area->Base();
198			size = area->Size();
199			kprintf("area %" B_PRId32 "(%s)\n", area->id, area->name);
200				isArea = true;
201		} else {
202			kprintf("unknown flag \"%s\"\n", argv[curArg]);
203			return 0;
204		}
205	}
206
207	kprintf("satp: %#" B_PRIx64 "\n", satp.val);
208
209	PageTableDumper dumper;
210
211	if (!isArea) {
212		Pte* root = (Pte*)VirtFromPhys(satp.ppn * B_PAGE_SIZE);
213		DumpPageTableInt(root, 0, 2, dumper);
214	} else {
215		for (; size > 0; base += B_PAGE_SIZE, size -= B_PAGE_SIZE) {
216			Pte* pte = LookupPte(satp.ppn * B_PAGE_SIZE, base);
217			if (pte == NULL || !pte->isValid)
218				continue;
219
220			dumper.Write(base, pte->ppn * B_PAGE_SIZE, B_PAGE_SIZE, pte->val & 0xff);
221		}
222	}
223
224	return 0;
225}
226
227
228static int
229DumpVirtPage(int argc, char** argv)
230{
231	int curArg = 1;
232	SatpReg satp;
233
234	satp.val = Satp();
235	while (curArg < argc && argv[curArg][0] == '-') {
236		if (strcmp(argv[curArg], "-team") == 0) {
237			curArg++;
238			team_id id = strtoul(argv[curArg++], NULL, 0);
239			VMAddressSpace* addrSpace = VMAddressSpace::DebugGet(id);
240			if (addrSpace == NULL) {
241				kprintf("could not find team %" B_PRId32 "\n", id);
242				return 0;
243			}
244			satp.val = ((RISCV64VMTranslationMap*)
245				addrSpace->TranslationMap())->Satp();
246		} else {
247			kprintf("unknown flag \"%s\"\n", argv[curArg]);
248			return 0;
249		}
250	}
251
252	kprintf("satp: %#" B_PRIx64 "\n", satp.val);
253
254	uint64 virt = 0;
255	if (!evaluate_debug_expression(argv[curArg++], &virt, false))
256		return 0;
257
258	virt = ROUNDDOWN(virt, B_PAGE_SIZE);
259
260	Pte* pte = LookupPte(satp.ppn * B_PAGE_SIZE, virt);
261	if (pte == NULL) {
262		dprintf("not mapped\n");
263		return 0;
264	}
265
266	PageTableDumper dumper;
267	dumper.Write(virt, pte->ppn * B_PAGE_SIZE, B_PAGE_SIZE, pte->val & 0xff);
268
269	return 0;
270}
271
272
273status_t
274arch_vm_init(kernel_args *args)
275{
276	return B_OK;
277}
278
279
280status_t
281arch_vm_init_post_area(kernel_args *args)
282{
283	void* address = (void*)args->arch_args.physMap.start;
284	area_id area = vm_create_null_area(VMAddressSpace::KernelID(),
285		"physical map area", &address, B_EXACT_ADDRESS,
286		args->arch_args.physMap.size, 0);
287	if (area < B_OK)
288		return area;
289
290	add_debugger_command("dump_page_table", &DumpPageTable, "Dump page table");
291	add_debugger_command("dump_virt_page", &DumpVirtPage, "Dump virtual page mapping");
292
293	return B_OK;
294}
295
296
297status_t
298arch_vm_init_post_modules(kernel_args *args)
299{
300	return B_OK;
301}
302
303
304status_t
305arch_vm_init_end(kernel_args *args)
306{
307	TRACE(("arch_vm_init_end(): %" B_PRIu32 " virtual ranges to keep:\n",
308		args->arch_args.num_virtual_ranges_to_keep));
309
310	for (int i = 0; i < (int)args->arch_args.num_virtual_ranges_to_keep; i++) {
311		addr_range &range = args->arch_args.virtual_ranges_to_keep[i];
312
313		TRACE(("  start: %p, size: %#" B_PRIxSIZE "\n", (void*)range.start, range.size));
314
315#if 1
316		// skip ranges outside the kernel address space
317		if (!IS_KERNEL_ADDRESS(range.start)) {
318			TRACE(("    no kernel address, skipping...\n"));
319			continue;
320		}
321
322		phys_addr_t physicalAddress;
323		void *address = (void*)range.start;
324		if (vm_get_page_mapping(VMAddressSpace::KernelID(), range.start,
325				&physicalAddress) != B_OK)
326			panic("arch_vm_init_end(): No page mapping for %p\n", address);
327		area_id area = vm_map_physical_memory(VMAddressSpace::KernelID(),
328			"boot loader reserved area", &address,
329			B_EXACT_ADDRESS, range.size,
330			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
331			physicalAddress, true);
332		if (area < 0) {
333			panic("arch_vm_init_end(): Failed to create area for boot loader "
334				"reserved area: %p - %p\n", (void*)range.start,
335				(void*)(range.start + range.size));
336		}
337#endif
338	}
339
340#if 0
341	// Throw away any address space mappings we've inherited from the boot
342	// loader and have not yet turned into an area.
343	vm_free_unused_boot_loader_range(0, 0xffffffff - B_PAGE_SIZE + 1);
344#endif
345
346	return B_OK;
347}
348
349
350void
351arch_vm_aspace_swap(struct VMAddressSpace *from, struct VMAddressSpace *to)
352{
353	// This functions is only invoked when a userland thread is in the process
354	// of dying. It switches to the kernel team and does whatever cleanup is
355	// necessary (in case it is the team's main thread, it will delete the
356	// team).
357	// It is however not necessary to change the page directory. Userland team's
358	// page directories include all kernel mappings as well. Furthermore our
359	// arch specific translation map data objects are ref-counted, so they won't
360	// go away as long as they are still used on any CPU.
361
362	SetSatp(((RISCV64VMTranslationMap*)to->TranslationMap())->Satp());
363	FlushTlbAllAsid(0);
364}
365
366
367bool
368arch_vm_supports_protection(uint32 protection)
369{
370	return true;
371}
372
373
374void
375arch_vm_unset_memory_type(VMArea *area)
376{
377}
378
379
380status_t
381arch_vm_set_memory_type(VMArea *area, phys_addr_t physicalBase, uint32 type)
382{
383	return B_OK;
384}
385