1/*
2 * Copyright 2016-2022 Haiku, Inc. All rights reserved.
3 * Copyright 2014, Jessica Hamilton, jessica.l.hamilton@gmail.com.
4 * Copyright 2014, Henry Harrington, henry.harrington@gmail.com.
5 * Distributed under the terms of the MIT License.
6 */
7
8
9#include <algorithm>
10
11#include <boot/addr_range.h>
12#include <boot/platform.h>
13#include <boot/stage2.h>
14#include <kernel/kernel.h>
15
16#include "efi_platform.h"
17#include "mmu.h"
18
19
20//#define TRACE_MMU
21#ifdef TRACE_MMU
22#   define TRACE(x...) dprintf("efi/mmu: " x)
23#else
24#   define TRACE(x...) ;
25#endif
26
27
28struct memory_region {
29	memory_region *next;
30	addr_t vaddr;
31	phys_addr_t paddr;
32	size_t size;
33
34	void dprint(const char * msg) {
35 	  dprintf("%s memory_region v: %#" B_PRIxADDR " p: %#" B_PRIxPHYSADDR " size: %lu\n", msg, vaddr,
36			paddr, size);
37	}
38
39	bool matches(phys_addr_t expected_paddr, size_t expected_size) {
40		return paddr == expected_paddr && size == expected_size;
41	}
42};
43
44
45static addr_t sNextVirtualAddress = KERNEL_LOAD_BASE + 32 * 1024 * 1024;
46static memory_region *allocated_regions = NULL;
47
48
49extern "C" phys_addr_t
50mmu_allocate_page()
51{
52	TRACE("%s: called\n", __func__);
53
54	efi_physical_addr addr;
55	efi_status s = kBootServices->AllocatePages(AllocateAnyPages,
56		EfiLoaderData, 1, &addr);
57
58	if (s != EFI_SUCCESS)
59		panic("Unabled to allocate memory: %li", s);
60
61	return addr;
62}
63
64
65extern "C" addr_t
66get_next_virtual_address(size_t size)
67{
68	TRACE("%s: called. size: %" B_PRIuSIZE "\n", __func__, size);
69
70	addr_t address = sNextVirtualAddress;
71	sNextVirtualAddress += ROUNDUP(size, B_PAGE_SIZE);
72	return address;
73}
74
75
76extern "C" addr_t
77get_current_virtual_address()
78{
79	TRACE("%s: called\n", __func__);
80	return sNextVirtualAddress;
81}
82
83
84// Platform allocator.
85// The bootloader assumes that bootloader address space == kernel address space.
86// This is not true until just before the kernel is booted, so an ugly hack is
87// used to cover the difference. platform_allocate_region allocates addresses
88// in bootloader space, but can convert them to kernel space. The ELF loader
89// accesses kernel memory via Mao(), and much later in the boot process,
90// addresses in the kernel argument struct are converted from bootloader
91// addresses to kernel addresses.
92
93extern "C" status_t
94platform_allocate_region(void **_address, size_t size, uint8 /* protection */,
95	bool exactAddress)
96{
97	TRACE("%s: called\n", __func__);
98
99	efi_physical_addr addr;
100	size_t pages = ROUNDUP(size, B_PAGE_SIZE) / B_PAGE_SIZE;
101	efi_status status;
102
103	if (exactAddress) {
104		addr = (efi_physical_addr)(addr_t)*_address;
105		status = kBootServices->AllocatePages(AllocateAddress,
106			EfiLoaderData, pages, &addr);
107	} else {
108		addr = 0;
109		status = kBootServices->AllocatePages(AllocateAnyPages,
110			EfiLoaderData, pages, &addr);
111	}
112
113	if (status != EFI_SUCCESS)
114		return B_NO_MEMORY;
115
116	// Addresses above 512GB not supported.
117	// Memory map regions above 512GB can be ignored, but if EFI returns pages
118	// above that there's nothing that can be done to fix it.
119	if (addr + size > (512ull * 1024 * 1024 * 1024))
120		panic("Can't currently support more than 512GB of RAM!");
121
122	memory_region *region = new(std::nothrow) memory_region {
123		next: allocated_regions,
124#ifdef __riscv
125		// Disables allocation at fixed virtual address
126		vaddr: 0,
127#else
128		vaddr: *_address == NULL ? 0 : (addr_t)*_address,
129#endif
130		paddr: (phys_addr_t)addr,
131		size: size
132	};
133
134	if (region == NULL) {
135		kBootServices->FreePages(addr, pages);
136		return B_NO_MEMORY;
137	}
138
139#ifdef TRACE_MMU
140	//region->dprint("Allocated");
141#endif
142	allocated_regions = region;
143	*_address = (void *)region->paddr;
144	return B_OK;
145}
146
147
148extern "C" status_t
149platform_allocate_lomem(void **_address, size_t size)
150{
151	TRACE("%s: called\n", __func__);
152
153	efi_physical_addr addr = KERNEL_LOAD_BASE - B_PAGE_SIZE;
154	size_t pages = ROUNDUP(size, B_PAGE_SIZE) / B_PAGE_SIZE;
155	efi_status status = kBootServices->AllocatePages(AllocateMaxAddress,
156		EfiLoaderData, pages, &addr);
157	if (status != EFI_SUCCESS)
158		return B_NO_MEMORY;
159
160	memory_region *region = new(std::nothrow) memory_region {
161		next: allocated_regions,
162		vaddr: (addr_t)addr,
163		paddr: (phys_addr_t)addr,
164		size: size
165	};
166
167	if (region == NULL) {
168		kBootServices->FreePages(addr, pages);
169		return B_NO_MEMORY;
170	}
171
172	allocated_regions = region;
173	*_address = (void *)region->paddr;
174	return B_OK;
175}
176
177
178/*!
179	Neither \a virtualAddress nor \a size need to be aligned, but the function
180	will map all pages the range intersects with.
181	If physicalAddress is not page-aligned, the returned virtual address will
182	have the same "misalignment".
183*/
184extern "C" addr_t
185mmu_map_physical_memory(addr_t physicalAddress, size_t size, uint32 flags)
186{
187	TRACE("%s: called\n", __func__);
188
189	addr_t pageOffset = physicalAddress & (B_PAGE_SIZE - 1);
190
191	physicalAddress -= pageOffset;
192	size += pageOffset;
193
194	if (insert_physical_allocated_range(physicalAddress,
195			ROUNDUP(size, B_PAGE_SIZE)) != B_OK)
196		return B_NO_MEMORY;
197
198	return physicalAddress + pageOffset;
199}
200
201
202static void
203convert_physical_ranges()
204{
205	TRACE("%s: called\n", __func__);
206
207	addr_range *range = gKernelArgs.physical_allocated_range;
208	uint32 num_ranges = gKernelArgs.num_physical_allocated_ranges;
209
210	for (uint32 i = 0; i < num_ranges; ++i) {
211		// Addresses above 512GB not supported.
212		// Memory map regions above 512GB can be ignored, but if EFI returns
213		// pages above that there's nothing that can be done to fix it.
214		if (range[i].start + range[i].size > (512ull * 1024 * 1024 * 1024))
215			panic("Can't currently support more than 512GB of RAM!");
216
217		memory_region *region = new(std::nothrow) memory_region {
218			next: allocated_regions,
219			vaddr: 0,
220			paddr: (phys_addr_t)range[i].start,
221			size: (size_t)range[i].size
222		};
223
224		if (!region)
225			panic("Couldn't add allocated region");
226
227		allocated_regions = region;
228
229		// Clear out the allocated range
230		range[i].start = 0;
231		range[i].size = 0;
232		gKernelArgs.num_physical_allocated_ranges--;
233	}
234}
235
236
237extern "C" status_t
238platform_bootloader_address_to_kernel_address(void *address, addr_t *_result)
239{
240	TRACE("%s: called\n", __func__);
241
242	// Convert any physical ranges prior to looking up address
243	convert_physical_ranges();
244
245	// Double cast needed to avoid sign extension issues on 32-bit architecture
246	phys_addr_t addr = (phys_addr_t)(addr_t)address;
247
248	for (memory_region *region = allocated_regions; region;
249			region = region->next) {
250		if (region->paddr <= addr && addr < region->paddr + region->size) {
251			// Lazily allocate virtual memory.
252			if (region->vaddr == 0) {
253				region->vaddr = get_next_virtual_address(region->size);
254			}
255			*_result = region->vaddr + (addr - region->paddr);
256			//dprintf("Converted bootloader address %p in region %#lx-%#lx to %#lx\n",
257			//	address, region->paddr, region->paddr + region->size, *_result);
258			return B_OK;
259		}
260	}
261
262	return B_ERROR;
263}
264
265
266extern "C" status_t
267platform_kernel_address_to_bootloader_address(addr_t address, void **_result)
268{
269	TRACE("%s: called\n", __func__);
270
271	for (memory_region *region = allocated_regions; region;
272			region = region->next) {
273		if (region->vaddr != 0 && region->vaddr <= address
274				&& address < region->vaddr + region->size) {
275			*_result = (void *)(region->paddr + (address - region->vaddr));
276			//dprintf("Converted kernel address %#lx in region %#lx-%#lx to %p\n",
277			//	address, region->vaddr, region->vaddr + region->size, *_result);
278			return B_OK;
279		}
280	}
281
282	return B_ERROR;
283}
284
285
286extern "C" status_t
287platform_free_region(void *address, size_t size)
288{
289	TRACE("%s: called to release region %p (%" B_PRIuSIZE ")\n", __func__,
290		address, size);
291
292	for (memory_region **ref = &allocated_regions; *ref;
293			ref = &(*ref)->next) {
294		// Double cast needed to avoid sign extension issues on 32-bit architecture
295		if ((*ref)->matches((phys_addr_t)(addr_t)address, size)) {
296			efi_status status;
297			status = kBootServices->FreePages((efi_physical_addr)(addr_t)address,
298				ROUNDUP(size, B_PAGE_SIZE) / B_PAGE_SIZE);
299			ASSERT_ALWAYS(status == EFI_SUCCESS);
300			memory_region* old = *ref;
301			//pointer to current allocated_memory_region* now points to next
302			*ref = (*ref)->next;
303#ifdef TRACE_MMU
304			old->dprint("Freeing");
305#endif
306			delete old;
307			return B_OK;
308		}
309	}
310	panic("platform_free_region: Unknown region to free??");
311	return B_ERROR; // NOT Reached
312}
313
314
315bool
316mmu_next_region(void** cookie, addr_t* vaddr, phys_addr_t* paddr, size_t* size)
317{
318	if (*cookie == NULL)
319		*cookie = allocated_regions;
320	else
321		*cookie = ((memory_region*)*cookie)->next;
322
323	memory_region* region = (memory_region*)*cookie;
324	if (region == NULL)
325		return false;
326
327	if (region->vaddr == 0)
328		region->vaddr = get_next_virtual_address(region->size);
329
330	*vaddr = region->vaddr;
331	*paddr = region->paddr;
332	*size = region->size;
333	return true;
334}
335