1/*
2 * Copyright 2002-2007, Axel D��rfler, axeld@pinc-software.de. All rights reserved.
3 * Distributed under the terms of the MIT License.
4 *
5 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
6 * Distributed under the terms of the NewOS License.
7 */
8
9
10#include "generic_vm_physical_page_mapper.h"
11
12#include <vm/vm_page.h>
13#include <vm/vm_priv.h>
14#include <vm/VMAddressSpace.h>
15#include <thread.h>
16#include <util/queue.h>
17
18#include <string.h>
19#include <stdlib.h>
20
21//#define TRACE_VM_PHYSICAL_PAGE_MAPPER
22#ifdef TRACE_VM_PHYSICAL_PAGE_MAPPER
23#	define TRACE(x) dprintf x
24#else
25#	define TRACE(x) ;
26#endif
27
28#define DEBUG_IO_SPACE
29
30// data and structures used to represent physical pages mapped into iospace
31typedef struct paddr_chunk_descriptor {
32	struct paddr_chunk_descriptor *next_q;
33		// must remain first in structure, queue code uses it
34	int32	ref_count;
35	addr_t	va;
36#ifdef DEBUG_IO_SPACE
37	thread_id last_ref;
38#endif
39} paddr_chunk_desc;
40
41static paddr_chunk_desc *paddr_desc;	// will be one per physical chunk
42static paddr_chunk_desc **virtual_pmappings; // will be one ptr per virtual chunk in iospace
43static int first_free_vmapping;
44static int num_virtual_chunks;
45static queue mapped_paddr_lru;
46static mutex sMutex = MUTEX_INITIALIZER("iospace_mutex");
47static sem_id sChunkAvailableSem;
48static int32 sChunkAvailableWaitingCounter;
49
50static generic_map_iospace_chunk_func sMapIOSpaceChunk;
51static addr_t sIOSpaceBase;
52static size_t sIOSpaceSize;
53static size_t sIOSpaceChunkSize;
54
55
56status_t
57generic_get_physical_page(phys_addr_t pa, addr_t *va, uint32 flags)
58{
59	int index;
60	paddr_chunk_desc *replaced_pchunk;
61
62restart:
63	mutex_lock(&sMutex);
64
65	// see if the page is already mapped
66	index = pa / sIOSpaceChunkSize;
67	if (paddr_desc[index].va != 0) {
68		if (paddr_desc[index].ref_count++ == 0) {
69			// pull this descriptor out of the lru list
70			queue_remove_item(&mapped_paddr_lru, &paddr_desc[index]);
71		}
72		*va = paddr_desc[index].va + pa % sIOSpaceChunkSize;
73		mutex_unlock(&sMutex);
74		return B_OK;
75	}
76
77	// map it
78	if (first_free_vmapping < num_virtual_chunks) {
79		// there's a free hole
80		paddr_desc[index].va = first_free_vmapping * sIOSpaceChunkSize
81			+ sIOSpaceBase;
82		*va = paddr_desc[index].va + pa % sIOSpaceChunkSize;
83		virtual_pmappings[first_free_vmapping] = &paddr_desc[index];
84		paddr_desc[index].ref_count++;
85
86		// push up the first_free_vmapping pointer
87		for (; first_free_vmapping < num_virtual_chunks;
88			 first_free_vmapping++) {
89			if (virtual_pmappings[first_free_vmapping] == NULL)
90				break;
91		}
92
93		sMapIOSpaceChunk(paddr_desc[index].va, index * sIOSpaceChunkSize,
94			flags);
95		mutex_unlock(&sMutex);
96
97		return B_OK;
98	}
99
100	// replace an earlier mapping
101	if (queue_peek(&mapped_paddr_lru) == NULL) {
102		// no free slots available
103		if ((flags & PHYSICAL_PAGE_DONT_WAIT) != 0) {
104			// put back to the caller and let them handle this
105			mutex_unlock(&sMutex);
106			return B_NO_MEMORY;
107		} else {
108			sChunkAvailableWaitingCounter++;
109
110			mutex_unlock(&sMutex);
111			acquire_sem(sChunkAvailableSem);
112			goto restart;
113		}
114	}
115
116	replaced_pchunk = (paddr_chunk_desc*)queue_dequeue(&mapped_paddr_lru);
117	paddr_desc[index].va = replaced_pchunk->va;
118	replaced_pchunk->va = 0;
119	*va = paddr_desc[index].va + pa % sIOSpaceChunkSize;
120	paddr_desc[index].ref_count++;
121#ifdef DEBUG_IO_SPACE
122	paddr_desc[index].last_ref = thread_get_current_thread_id();
123#endif
124	virtual_pmappings[(*va - sIOSpaceBase) / sIOSpaceChunkSize]
125		= paddr_desc + index;
126
127	sMapIOSpaceChunk(paddr_desc[index].va, index * sIOSpaceChunkSize, flags);
128
129	mutex_unlock(&sMutex);
130	return B_OK;
131}
132
133
134status_t
135generic_put_physical_page(addr_t va)
136{
137	paddr_chunk_desc *desc;
138
139	if (va < sIOSpaceBase || va >= sIOSpaceBase + sIOSpaceSize)
140		panic("someone called put_physical_page on an invalid va 0x%lx\n", va);
141	va -= sIOSpaceBase;
142
143	mutex_lock(&sMutex);
144
145	desc = virtual_pmappings[va / sIOSpaceChunkSize];
146	if (desc == NULL) {
147		mutex_unlock(&sMutex);
148		panic("put_physical_page called on page at va 0x%lx which is not checked out\n",
149			va);
150		return B_ERROR;
151	}
152
153	if (--desc->ref_count == 0) {
154		// put it on the mapped lru list
155		queue_enqueue(&mapped_paddr_lru, desc);
156
157		if (sChunkAvailableWaitingCounter > 0) {
158			sChunkAvailableWaitingCounter--;
159			release_sem_etc(sChunkAvailableSem, 1, B_DO_NOT_RESCHEDULE);
160		}
161	}
162
163	mutex_unlock(&sMutex);
164
165	return B_OK;
166}
167
168
169#ifdef DEBUG_IO_SPACE
170static int
171dump_iospace(int argc, char** argv)
172{
173	if (argc < 2) {
174		kprintf("usage: iospace <physical|virtual|queue>\n");
175		return 0;
176	}
177
178	int32 i;
179
180	if (strchr(argv[1], 'p')) {
181		// physical address descriptors
182		kprintf("I/O space physical descriptors (%p)\n", paddr_desc);
183
184		int32 max = vm_page_num_pages() / (sIOSpaceChunkSize / B_PAGE_SIZE);
185		if (argc == 3)
186			max = strtol(argv[2], NULL, 0);
187
188		for (i = 0; i < max; i++) {
189			kprintf("[%03lx %p %3ld %3ld] ", i, (void *)paddr_desc[i].va,
190				paddr_desc[i].ref_count, paddr_desc[i].last_ref);
191			if (i % 4 == 3)
192				kprintf("\n");
193		}
194		if (i % 4)
195			kprintf("\n");
196	}
197
198	if (strchr(argv[1], 'v')) {
199		// virtual mappings
200		kprintf("I/O space virtual chunk mappings (%p, first free: %d)\n",
201			virtual_pmappings, first_free_vmapping);
202
203		for (i = 0; i < num_virtual_chunks; i++) {
204			kprintf("[%2ld. %03lx] ", i, virtual_pmappings[i] - paddr_desc);
205			if (i % 8 == 7)
206				kprintf("\n");
207		}
208		if (i % 8)
209			kprintf("\n");
210	}
211
212	if (strchr(argv[1], 'q')) {
213		// unused queue
214		kprintf("I/O space mapped queue:\n");
215
216		paddr_chunk_descriptor* descriptor
217			= (paddr_chunk_descriptor *)queue_peek(&mapped_paddr_lru);
218		i = 0;
219
220		while (descriptor != NULL) {
221			kprintf("[%03lx %p] ",
222				(descriptor - paddr_desc) / sizeof(paddr_desc[0]), descriptor);
223			if (i++ % 8 == 7)
224				kprintf("\n");
225
226			descriptor = descriptor->next_q;
227		}
228		if (i % 8)
229			kprintf("\n");
230	}
231
232	return 0;
233}
234#endif
235
236
237//	#pragma mark -
238
239
240status_t
241generic_vm_physical_page_mapper_init(kernel_args *args,
242	generic_map_iospace_chunk_func mapIOSpaceChunk, addr_t *ioSpaceBase,
243	size_t ioSpaceSize, size_t ioSpaceChunkSize)
244{
245	TRACE(("generic_vm_physical_page_mapper_init: entry\n"));
246
247	sMapIOSpaceChunk = mapIOSpaceChunk;
248	sIOSpaceSize = ioSpaceSize;
249	sIOSpaceChunkSize = ioSpaceChunkSize;
250
251	// reserve virtual space for the IO space
252	sIOSpaceBase = vm_allocate_early(args, sIOSpaceSize, 0, 0,
253		ioSpaceChunkSize);
254	if (sIOSpaceBase == 0) {
255		panic("generic_vm_physical_page_mapper_init(): Failed to reserve IO "
256			"space in virtual address space!");
257		return B_ERROR;
258	}
259
260	*ioSpaceBase = sIOSpaceBase;
261
262	// allocate some space to hold physical page mapping info
263	paddr_desc = (paddr_chunk_desc *)vm_allocate_early(args,
264		sizeof(paddr_chunk_desc) * 1024, ~0L,
265		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0);
266	num_virtual_chunks = sIOSpaceSize / sIOSpaceChunkSize;
267	virtual_pmappings = (paddr_chunk_desc **)vm_allocate_early(args,
268		sizeof(paddr_chunk_desc *) * num_virtual_chunks, ~0L,
269		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0);
270
271	TRACE(("paddr_desc %p, virtual_pmappings %p"/*", iospace_pgtables %p"*/"\n",
272		paddr_desc, virtual_pmappings/*, iospace_pgtables*/));
273
274	// initialize our data structures
275	memset(paddr_desc, 0, sizeof(paddr_chunk_desc) * 1024);
276	memset(virtual_pmappings, 0, sizeof(paddr_chunk_desc *) * num_virtual_chunks);
277	first_free_vmapping = 0;
278	queue_init(&mapped_paddr_lru);
279	sChunkAvailableSem = -1;
280
281	TRACE(("generic_vm_physical_page_mapper_init: done\n"));
282
283	return B_OK;
284}
285
286
287status_t
288generic_vm_physical_page_mapper_init_post_area(kernel_args *args)
289{
290	void *temp;
291
292	TRACE(("generic_vm_physical_page_mapper_init_post_area: entry\n"));
293
294	temp = (void *)paddr_desc;
295	create_area("physical_page_mapping_descriptors", &temp, B_EXACT_ADDRESS,
296		ROUNDUP(sizeof(paddr_chunk_desc) * 1024, B_PAGE_SIZE),
297		B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
298
299	temp = (void *)virtual_pmappings;
300	create_area("iospace_virtual_chunk_descriptors", &temp, B_EXACT_ADDRESS,
301		ROUNDUP(sizeof(paddr_chunk_desc *) * num_virtual_chunks, B_PAGE_SIZE),
302		B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
303
304	TRACE(("generic_vm_physical_page_mapper_init_post_area: creating iospace\n"));
305	temp = (void *)sIOSpaceBase;
306	area_id ioSpaceArea = vm_create_null_area(VMAddressSpace::KernelID(),
307		"iospace", &temp, B_EXACT_ADDRESS, sIOSpaceSize,
308		CREATE_AREA_PRIORITY_VIP);
309	if (ioSpaceArea < 0) {
310		panic("generic_vm_physical_page_mapper_init_post_area(): Failed to "
311			"create null area for IO space!\n");
312		return ioSpaceArea;
313	}
314
315	TRACE(("generic_vm_physical_page_mapper_init_post_area: done\n"));
316
317#ifdef DEBUG_IO_SPACE
318	add_debugger_command("iospace", &dump_iospace, "Shows info about the I/O space area.");
319#endif
320
321	return B_OK;
322}
323
324
325status_t
326generic_vm_physical_page_mapper_init_post_sem(kernel_args *args)
327{
328	sChunkAvailableSem = create_sem(1, "iospace chunk available");
329
330	return sChunkAvailableSem >= B_OK ? B_OK : sChunkAvailableSem;
331}
332