1/*
2 * Copyright 2019 Haiku, Inc. All Rights Reserved.
3 * Distributed under the terms of the MIT License.
4 */
5
6
7#include <arch/vm_translation_map.h>
8#include <boot/kernel_args.h>
9#include <vm/VMAddressSpace.h>
10#include <vm/vm.h>
11
12#include "PMAPPhysicalPageMapper.h"
13#include "VMSAv8TranslationMap.h"
14
15static char sPhysicalPageMapperData[sizeof(PMAPPhysicalPageMapper)];
16
17
18status_t
19arch_vm_translation_map_create_map(bool kernel, VMTranslationMap** _map)
20{
21	phys_addr_t pt = 0;
22	if (kernel) {
23		pt = READ_SPECIALREG(TTBR1_EL1);
24	} else {
25		panic("arch_vm_translation_map_create_map user not implemented");
26	}
27
28	*_map = new (std::nothrow) VMSAv8TranslationMap(kernel, pt, 12, 48, 1);
29
30	if (*_map == NULL)
31		return B_NO_MEMORY;
32
33	return B_OK;
34}
35
36
37status_t
38arch_vm_translation_map_init(kernel_args* args, VMPhysicalPageMapper** _physicalPageMapper)
39{
40	dprintf("arch_vm_translation_map_init\n");
41
42	// nuke TTBR0 mapping, we use identity mapping in kernel space at KERNEL_PMAP_BASE
43	memset((void*) READ_SPECIALREG(TTBR0_EL1), 0, B_PAGE_SIZE);
44
45	uint64_t tcr = READ_SPECIALREG(TCR_EL1);
46	uint32_t t0sz = tcr & 0x1f;
47	uint32_t t1sz = (tcr >> 16) & 0x1f;
48	uint32_t tg0 = (tcr >> 14) & 0x3;
49	uint32_t tg1 = (tcr >> 30) & 0x3;
50	uint64_t ttbr0 = READ_SPECIALREG(TTBR0_EL1);
51	uint64_t ttbr1 = READ_SPECIALREG(TTBR1_EL1);
52	uint64_t mair = READ_SPECIALREG(MAIR_EL1);
53	uint64_t mmfr1 = READ_SPECIALREG(ID_AA64MMFR1_EL1);
54	uint64_t sctlr = READ_SPECIALREG(SCTLR_EL1);
55
56	uint64_t hafdbs = ID_AA64MMFR1_HAFDBS(mmfr1);
57	if (hafdbs == ID_AA64MMFR1_HAFDBS_AF) {
58		VMSAv8TranslationMap::fHwFeature = VMSAv8TranslationMap::HW_ACCESS;
59		tcr |= (1UL << 39);
60	}
61	if (hafdbs == ID_AA64MMFR1_HAFDBS_AF_DBS) {
62		VMSAv8TranslationMap::fHwFeature
63			= VMSAv8TranslationMap::HW_ACCESS | VMSAv8TranslationMap::HW_DIRTY;
64		tcr |= (1UL << 40) | (1UL << 39);
65	}
66
67	VMSAv8TranslationMap::fMair = mair;
68
69	WRITE_SPECIALREG(TCR_EL1, tcr);
70
71	dprintf("vm config: MMFR1: %lx, TCR: %lx\nTTBR0: %lx, TTBR1: %lx\nT0SZ: %u, T1SZ: %u, TG0: %u, "
72			"TG1: %u, MAIR: %lx, SCTLR: %lx\n",
73		mmfr1, tcr, ttbr0, ttbr1, t0sz, t1sz, tg0, tg1, mair, sctlr);
74
75	*_physicalPageMapper = new (&sPhysicalPageMapperData) PMAPPhysicalPageMapper();
76
77	return B_OK;
78}
79
80
81status_t
82arch_vm_translation_map_init_post_sem(kernel_args* args)
83{
84	dprintf("arch_vm_translation_map_init_post_sem\n");
85	return B_OK;
86}
87
88
89status_t
90arch_vm_translation_map_init_post_area(kernel_args* args)
91{
92	dprintf("arch_vm_translation_map_init_post_area\n");
93
94	// Create an area covering the physical map area.
95	void* address = (void*) KERNEL_PMAP_BASE;
96	area_id area = vm_create_null_area(VMAddressSpace::KernelID(), "physical map area", &address,
97		B_EXACT_ADDRESS, KERNEL_PMAP_SIZE, 0);
98
99	if (args->arch_args.uart.kind[0] != 0) {
100		// debug uart is already mapped by the efi loader
101		address = (void*)args->arch_args.uart.regs.start;
102		area_id area = vm_create_null_area(VMAddressSpace::KernelID(),
103			"debug uart map area", &address, B_EXACT_ADDRESS,
104			ROUNDUP(args->arch_args.uart.regs.size, B_PAGE_SIZE), 0);
105	}
106
107	return B_OK;
108}
109
110// TODO: reuse some bits from VMSAv8TranslationMap
111
112static constexpr uint64_t kPteAddrMask = (((1UL << 36) - 1) << 12);
113static constexpr uint64_t kPteAttrMask = ~(kPteAddrMask | 0x3);
114
115static uint64_t page_bits = 12;
116static uint64_t tsz = 16;
117
118
119static uint64_t*
120TableFromPa(phys_addr_t pa)
121{
122	return reinterpret_cast<uint64_t*>(KERNEL_PMAP_BASE + pa);
123}
124
125
126static void
127map_page_early(phys_addr_t ptPa, int level, addr_t va, phys_addr_t pa,
128	phys_addr_t (*get_free_page)(kernel_args*), kernel_args* args)
129{
130	int tableBits = page_bits - 3;
131	uint64_t tableMask = (1UL << tableBits) - 1;
132
133	int shift = tableBits * (3 - level) + page_bits;
134
135	int index = (va >> shift) & tableMask;
136	uint64_t* pte = &TableFromPa(ptPa)[index];
137
138	if (level == 3) {
139		atomic_set64((int64*) pte, pa | 0x3);
140		asm("dsb ish");
141	} else {
142		uint64_t pteVal = atomic_get64((int64*) pte);
143		int type = pteVal & 0x3;
144
145		phys_addr_t table;
146		if (type == 0x3) {
147			table = pteVal & kPteAddrMask;
148		} else {
149			table = get_free_page(args) << page_bits;
150			dprintf("early: pulling page %lx\n", table);
151			uint64_t* newTableVa = TableFromPa(table);
152
153			if (type == 0x1) {
154				int shift = tableBits * (3 - (level + 1)) + page_bits;
155				int entrySize = 1UL << shift;
156
157				for (int i = 0; i < (1 << tableBits); i++)
158					newTableVa[i] = pteVal + i * entrySize;
159			} else {
160				memset(newTableVa, 0, 1 << page_bits);
161			}
162
163			asm("dsb ish");
164
165			atomic_set64((int64*) pte, table | 0x3);
166		}
167
168		map_page_early(table, level + 1, va, pa, get_free_page, args);
169	}
170}
171
172
173status_t
174arch_vm_translation_map_early_map(kernel_args* args, addr_t va, phys_addr_t pa, uint8 attributes,
175	phys_addr_t (*get_free_page)(kernel_args*))
176{
177	int va_bits = 64 - tsz;
178	uint64_t va_mask = (1UL << va_bits) - 1;
179	ASSERT((va & ~va_mask) == ~va_mask);
180
181	phys_addr_t ptPa = READ_SPECIALREG(TTBR1_EL1);
182	int level = VMSAv8TranslationMap::CalcStartLevel(va_bits, page_bits);
183	va &= va_mask;
184	pa |= VMSAv8TranslationMap::GetMemoryAttr(attributes, 0, true);
185
186	map_page_early(ptPa, level, va, pa, get_free_page, args);
187
188	return B_OK;
189}
190
191
192bool
193arch_vm_translation_map_is_kernel_page_accessible(addr_t va, uint32 protection)
194{
195	if (protection & B_KERNEL_WRITE_AREA) {
196		asm("at s1e1w, %0" : : "r"((uint64_t) va));
197		return (READ_SPECIALREG(PAR_EL1) & PAR_F) == 0;
198	} else {
199		asm("at s1e1r, %0" : : "r"((uint64_t) va));
200		return (READ_SPECIALREG(PAR_EL1) & PAR_F) == 0;
201	}
202}
203