1/*
2 * Copyright 2021-2022 Haiku, Inc. All rights reserved.
3 * Released under the terms of the MIT License.
4 */
5
6#ifndef _ARM64_ARCH_MMU_H
7#define _ARM64_ARCH_MMU_H
8
9
10/*
11 * Quotes taken from:
12 * Arm(C) Architecture Reference Manual
13 * Armv8, for Armv8-A architecture profile
14 * Chapter: D5.3 VMSAv8-64 translation table format descriptors
15 */
16class ARMv8TranslationTableDescriptor {
17
18	/* Descriptor bit[0] identifies whether the descriptor is valid,
19	 * and is 1 for a valid descriptor. If a lookup returns an invalid
20	 * descriptor, the associated input address is unmapped, and any
21	 * attempt to access it generates a Translation fault.
22	 *
23	 * Descriptor bit[1] identifies the descriptor type, and is encoded as:
24	 * 0, Block The descriptor gives the base address of a block of memory,
25	 * and the attributes for that memory region.
26	 * 1, Table The descriptor gives the address of the next level of
27	 * translation table, and for a stage 1 translation, some attributes for
28	 * that translation.
29	 */
30
31	static constexpr uint64_t kTypeMask = 0x3u;
32
33	static constexpr uint64_t kTypeInvalid = 0x0u;
34	static constexpr uint64_t kTypeBlock = 0x1u;
35	static constexpr uint64_t kTypeTable = 0x3u;
36	static constexpr uint64_t kTypePage = 0x3u;
37
38	// TODO: Place TABLE PAGE BLOCK prefixes accordingly
39	struct UpperAttributes {
40		static constexpr uint64_t TABLE_PXN	= (1UL << 59);
41		static constexpr uint64_t TABLE_XN	= (1UL << 60);
42		static constexpr uint64_t TABLE_AP	= (1UL << 61);
43		static constexpr uint64_t TABLE_NS	= (1UL << 63);
44		static constexpr uint64_t BLOCK_PXN	= (1UL << 53);
45		static constexpr uint64_t BLOCK_UXN	= (1UL << 54);
46	};
47
48	struct LowerAttributes {
49		static constexpr uint64_t BLOCK_NS			= (1 << 5);
50		static constexpr uint64_t BLOCK_NON_SHARE	= (0 << 8);
51		static constexpr uint64_t BLOCK_OUTER_SHARE	= (2 << 8);
52		static constexpr uint64_t BLOCK_INNER_SHARE	= (3 << 8);
53		static constexpr uint64_t BLOCK_AF			= (1UL << 10);
54		static constexpr uint64_t BLOCK_NG			= (1UL << 11);
55	};
56
57public:
58
59	static constexpr uint64 DefaultPeripheralAttribute = LowerAttributes::BLOCK_AF
60		| LowerAttributes::BLOCK_NON_SHARE
61		| UpperAttributes::BLOCK_PXN
62		| UpperAttributes::BLOCK_UXN;
63
64	static constexpr uint64 DefaultCodeAttribute = LowerAttributes::BLOCK_AF
65		| LowerAttributes::BLOCK_OUTER_SHARE;
66
67	ARMv8TranslationTableDescriptor(uint64_t* descriptor)
68		: fDescriptor(descriptor)
69	{}
70
71	ARMv8TranslationTableDescriptor(uint64_t descriptor)
72		: fDescriptor(reinterpret_cast<uint64_t*>(descriptor))
73	{}
74
75	bool IsInvalid() {
76		return (*fDescriptor & kTypeMask) == kTypeInvalid;
77	}
78
79	bool IsBlock() {
80		return (*fDescriptor & kTypeMask) == kTypeBlock;
81	}
82
83	bool IsPage() {
84		return (*fDescriptor & kTypeMask) == kTypePage;
85	}
86
87	bool IsTable() {
88		return (*fDescriptor & kTypeMask) == kTypeTable;
89	}
90
91
92	uint64_t* Dereference() {
93		if (IsTable())
94			// TODO: Use ATTR_MASK
95			return reinterpret_cast<uint64_t*>((*fDescriptor) & 0x0000fffffffff000ULL);
96		else
97			return NULL;
98	}
99
100	void SetToTable(uint64* descriptor, uint64_t attributes) {
101		*fDescriptor = reinterpret_cast<uint64_t>(descriptor) | kTypeTable;
102	}
103
104	void SetAsPage(uint64_t* physical, uint64_t attributes) {
105		*fDescriptor = CleanAttributes(reinterpret_cast<uint64_t>(physical)) | attributes | kTypePage;
106	}
107
108	void SetAsBlock(uint64_t* physical, uint64_t attributes) {
109		*fDescriptor = CleanAttributes(reinterpret_cast<uint64_t>(physical)) | attributes | kTypeBlock;
110	}
111
112	void Next() {
113		fDescriptor++;
114	}
115
116	void JumpTo(uint16 slot) {
117		fDescriptor += slot;
118	}
119
120	uint64 Value() {
121		return *fDescriptor;
122	}
123
124	uint64 Location() {
125		return reinterpret_cast<uint64_t>(fDescriptor);
126	}
127
128private:
129
130	static uint64 CleanAttributes(uint64 address) {
131		return address & ~ATTR_MASK;
132	}
133
134	uint64_t* fDescriptor;
135};
136
137
138class MemoryAttributeIndirection {
139
140public:
141	MemoryAttributeIndirection(uint8 el = kInvalidExceptionLevel)
142	{
143		if (el == kInvalidExceptionLevel) {
144			el = arch_exception_level();
145		}
146
147		switch(el)
148		{
149			case 1:
150				fMair = READ_SPECIALREG(MAIR_EL1);
151				break;
152			case 2:
153				fMair = READ_SPECIALREG(MAIR_EL2);
154				break;
155			case 3:
156				fMair = READ_SPECIALREG(MAIR_EL3);
157				break;
158			default:
159				fMair = 0x00u;
160				break;
161		}
162	}
163
164	uint8 IndexOf(uint8 requirement) {
165
166		uint64 processedMair = fMair;
167		uint8 index = 0;
168
169		while (((processedMair & 0xFF) != requirement) && (index < 8)) {
170			index++;
171			processedMair = (processedMair >> 8);
172		}
173
174		return (index < 8)?index:0xff;
175	}
176
177
178	uint64 MaskOf(uint8 requirement) {
179		return IndexOf(requirement) << 2;
180	}
181
182private:
183	uint64 fMair;
184};
185
186
187class ARMv8TranslationRegime {
188
189	static const uint8 skTranslationLevels = 4;
190public:
191
192	struct TranslationLevel {
193		uint8 shift;
194		uint64 mask;
195		bool blocks;
196		bool tables;
197		bool pages;
198	};
199
200	typedef struct TranslationLevel TranslationDescriptor[skTranslationLevels];
201
202	ARMv8TranslationRegime(TranslationDescriptor& regime)
203		: fRegime(regime)
204	{}
205
206	uint16 DescriptorIndex(addr_t virt_addr, uint8 level) {
207		return (virt_addr >> fRegime[level].shift) & fRegime[level].mask;
208	}
209
210	bool BlocksAllowed(uint8 level) {
211		return fRegime[level].blocks;
212	}
213
214	bool TablesAllowed(uint8 level) {
215		return fRegime[level].tables;
216	}
217
218	bool PagesAllowed(uint8 level) {
219		return fRegime[level].pages;
220	}
221
222	uint64 Mask(uint8 level) {
223		return EntrySize(level) - 1;
224	}
225
226	bool Aligned(addr_t address, uint8 level) {
227		return (address & Mask(level)) == 0;
228	}
229
230	uint64 EntrySize(uint8 level) {
231		return 1ul << fRegime[level].shift;
232	}
233
234	uint64 TableSize(uint8 level) {
235		return EntrySize(level) * arch_mmu_entries_per_granularity(Granularity());
236	}
237
238	uint64* AllocatePage(void) {
239		uint64 size = Granularity();
240		uint64* page = NULL;
241#if 0
242		// BUG: allocation here overlaps assigned memory ...
243		if (platform_allocate_region((void **)&page, size, 0, false) == B_OK) {
244#else
245		// TODO: luckly size == B_PAGE_SIZE == 4KB ...
246		page = reinterpret_cast<uint64*>(mmu_allocate_page());
247		if (page != NULL) {
248#endif
249			memset(page, 0, size);
250			if ((reinterpret_cast<uint64>(page) & (size - 1)) != 0) {
251				panic("Memory requested not %lx aligned\n", size - 1);
252			}
253			return page;
254		} else {
255			panic("Unavalable memory for descriptors\n");
256			return NULL;
257		}
258	}
259
260	uint8 MaxLevels() {
261		return skTranslationLevels;
262	}
263
264	uint64 Granularity() {
265		// Size of the last level ...
266		return EntrySize(skTranslationLevels - 1);
267	}
268
269private:
270	TranslationDescriptor& fRegime;
271};
272
273#endif /* _ARM64_ARCH_MMU_H */
274