pmap_dispatch.c revision 190681
1/*-
2 * Copyright (c) 2005 Peter Grehan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: head/sys/powerpc/powerpc/pmap_dispatch.c 190681 2009-04-04 00:22:44Z nwhitehorn $");
30
31/*
32 * Dispatch MI pmap calls to the appropriate MMU implementation
33 * through a previously registered kernel object.
34 *
35 * Before pmap_bootstrap() can be called, a CPU module must have
36 * called pmap_mmu_install(). This may be called multiple times:
37 * the highest priority call will be installed as the default
38 * MMU handler when pmap_bootstrap() is called.
39 *
40 * It is required that mutex_init() be called before pmap_bootstrap(),
41 * as the PMAP layer makes extensive use of mutexes.
42 */
43
44#include <sys/param.h>
45#include <sys/kernel.h>
46#include <sys/lock.h>
47#include <sys/ktr.h>
48#include <sys/mutex.h>
49#include <sys/systm.h>
50
51#include <vm/vm.h>
52#include <vm/vm_page.h>
53
54#include <machine/mmuvar.h>
55#include <machine/smp.h>
56
57#include "mmu_if.h"
58
59static mmu_def_t	*mmu_def_impl;
60static mmu_t		mmu_obj;
61static struct mmu_kobj	mmu_kernel_obj;
62static struct kobj_ops	mmu_kernel_kops;
63
64/*
65 * pmap globals
66 */
67struct pmap kernel_pmap_store;
68
69struct msgbuf *msgbufp;
70vm_offset_t    msgbuf_phys;
71
72vm_offset_t kernel_vm_end;
73vm_offset_t phys_avail[PHYS_AVAIL_SZ];
74vm_offset_t virtual_avail;
75vm_offset_t virtual_end;
76
77int pmap_bootstrapped;
78
79void
80pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
81{
82
83	CTR4(KTR_PMAP, "%s(%p, %#x, %u)", __func__, pmap, va, wired);
84	MMU_CHANGE_WIRING(mmu_obj, pmap, va, wired);
85}
86
87void
88pmap_clear_modify(vm_page_t m)
89{
90
91	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
92	MMU_CLEAR_MODIFY(mmu_obj, m);
93}
94
95void
96pmap_clear_reference(vm_page_t m)
97{
98
99	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
100	MMU_CLEAR_REFERENCE(mmu_obj, m);
101}
102
103void
104pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
105    vm_size_t len, vm_offset_t src_addr)
106{
107
108	CTR6(KTR_PMAP, "%s(%p, %p, %#x, %#x, %#x)", __func__, dst_pmap,
109	    src_pmap, dst_addr, len, src_addr);
110	MMU_COPY(mmu_obj, dst_pmap, src_pmap, dst_addr, len, src_addr);
111}
112
113void
114pmap_copy_page(vm_page_t src, vm_page_t dst)
115{
116
117	CTR3(KTR_PMAP, "%s(%p, %p)", __func__, src, dst);
118	MMU_COPY_PAGE(mmu_obj, src, dst);
119}
120
121void
122pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t p,
123    vm_prot_t prot, boolean_t wired)
124{
125
126	CTR6(KTR_PMAP, "pmap_enter(%p, %#x, %#x, %p, %#x, %u)", pmap, va,
127	    access, p, prot, wired);
128	MMU_ENTER(mmu_obj, pmap, va, p, prot, wired);
129}
130
131void
132pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
133    vm_page_t m_start, vm_prot_t prot)
134{
135
136	CTR6(KTR_PMAP, "%s(%p, %#x, %#x, %p, %#x)", __func__, pmap, start,
137	    end, m_start, prot);
138	MMU_ENTER_OBJECT(mmu_obj, pmap, start, end, m_start, prot);
139}
140
141void
142pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
143{
144
145	CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, pmap, va, m, prot);
146	MMU_ENTER_QUICK(mmu_obj, pmap, va, m, prot);
147}
148
149vm_paddr_t
150pmap_extract(pmap_t pmap, vm_offset_t va)
151{
152
153	CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, va);
154	return (MMU_EXTRACT(mmu_obj, pmap, va));
155}
156
157vm_page_t
158pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
159{
160
161	CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, va, prot);
162	return (MMU_EXTRACT_AND_HOLD(mmu_obj, pmap, va, prot));
163}
164
165void
166pmap_growkernel(vm_offset_t va)
167{
168
169	CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
170	MMU_GROWKERNEL(mmu_obj, va);
171}
172
173void
174pmap_init(void)
175{
176
177	CTR1(KTR_PMAP, "%s()", __func__);
178	MMU_INIT(mmu_obj);
179}
180
181boolean_t
182pmap_is_modified(vm_page_t m)
183{
184
185	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
186	return (MMU_IS_MODIFIED(mmu_obj, m));
187}
188
189boolean_t
190pmap_is_prefaultable(pmap_t pmap, vm_offset_t va)
191{
192
193	CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, va);
194	return (MMU_IS_PREFAULTABLE(mmu_obj, pmap, va));
195}
196
197boolean_t
198pmap_ts_referenced(vm_page_t m)
199{
200
201	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
202	return (MMU_TS_REFERENCED(mmu_obj, m));
203}
204
205vm_offset_t
206pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
207{
208
209	CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, virt, start, end,
210	    prot);
211	return (MMU_MAP(mmu_obj, virt, start, end, prot));
212}
213
214void
215pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
216    vm_pindex_t pindex, vm_size_t size)
217{
218
219	CTR6(KTR_PMAP, "%s(%p, %#x, %p, %u, %#x)", __func__, pmap, addr,
220	    object, pindex, size);
221	MMU_OBJECT_INIT_PT(mmu_obj, pmap, addr, object, pindex, size);
222}
223
224boolean_t
225pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
226{
227
228	CTR3(KTR_PMAP, "%s(%p, %p)", __func__, pmap, m);
229	return (MMU_PAGE_EXISTS_QUICK(mmu_obj, pmap, m));
230}
231
232void
233pmap_page_init(vm_page_t m)
234{
235
236	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
237	MMU_PAGE_INIT(mmu_obj, m);
238}
239
240int
241pmap_page_wired_mappings(vm_page_t m)
242{
243
244	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
245	return (MMU_PAGE_WIRED_MAPPINGS(mmu_obj, m));
246}
247
248int
249pmap_pinit(pmap_t pmap)
250{
251
252	CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
253	MMU_PINIT(mmu_obj, pmap);
254	return (1);
255}
256
257void
258pmap_pinit0(pmap_t pmap)
259{
260
261	CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
262	MMU_PINIT0(mmu_obj, pmap);
263}
264
265void
266pmap_protect(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_prot_t prot)
267{
268
269	CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, pmap, start, end,
270	    prot);
271	MMU_PROTECT(mmu_obj, pmap, start, end, prot);
272}
273
274void
275pmap_qenter(vm_offset_t start, vm_page_t *m, int count)
276{
277
278	CTR4(KTR_PMAP, "%s(%#x, %p, %d)", __func__, start, m, count);
279	MMU_QENTER(mmu_obj, start, m, count);
280}
281
282void
283pmap_qremove(vm_offset_t start, int count)
284{
285
286	CTR3(KTR_PMAP, "%s(%#x, %d)", __func__, start, count);
287	MMU_QREMOVE(mmu_obj, start, count);
288}
289
290void
291pmap_release(pmap_t pmap)
292{
293
294	CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
295	MMU_RELEASE(mmu_obj, pmap);
296}
297
298void
299pmap_remove(pmap_t pmap, vm_offset_t start, vm_offset_t end)
300{
301
302	CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, start, end);
303	MMU_REMOVE(mmu_obj, pmap, start, end);
304}
305
306void
307pmap_remove_all(vm_page_t m)
308{
309
310	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
311	MMU_REMOVE_ALL(mmu_obj, m);
312}
313
314void
315pmap_remove_pages(pmap_t pmap)
316{
317
318	CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
319	MMU_REMOVE_PAGES(mmu_obj, pmap);
320}
321
322void
323pmap_remove_write(vm_page_t m)
324{
325
326	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
327	MMU_REMOVE_WRITE(mmu_obj, m);
328}
329
330void
331pmap_zero_page(vm_page_t m)
332{
333
334	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
335	MMU_ZERO_PAGE(mmu_obj, m);
336}
337
338void
339pmap_zero_page_area(vm_page_t m, int off, int size)
340{
341
342	CTR4(KTR_PMAP, "%s(%p, %d, %d)", __func__, m, off, size);
343	MMU_ZERO_PAGE_AREA(mmu_obj, m, off, size);
344}
345
346void
347pmap_zero_page_idle(vm_page_t m)
348{
349
350	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
351	MMU_ZERO_PAGE_IDLE(mmu_obj, m);
352}
353
354int
355pmap_mincore(pmap_t pmap, vm_offset_t addr)
356{
357
358	CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr);
359	return (MMU_MINCORE(mmu_obj, pmap, addr));
360}
361
362void
363pmap_activate(struct thread *td)
364{
365
366	CTR2(KTR_PMAP, "%s(%p)", __func__, td);
367	MMU_ACTIVATE(mmu_obj, td);
368}
369
370void
371pmap_deactivate(struct thread *td)
372{
373
374	CTR2(KTR_PMAP, "%s(%p)", __func__, td);
375	MMU_DEACTIVATE(mmu_obj, td);
376}
377
378/*
379 *	Increase the starting virtual address of the given mapping if a
380 *	different alignment might result in more superpage mappings.
381 */
382void
383pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
384    vm_offset_t *addr, vm_size_t size)
385{
386
387	CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, object, offset, addr,
388	    size);
389	MMU_ALIGN_SUPERPAGE(mmu_obj, object, offset, addr, size);
390}
391
392/*
393 * Routines used in machine-dependent code
394 */
395void
396pmap_bootstrap(vm_offset_t start, vm_offset_t end)
397{
398	mmu_obj = &mmu_kernel_obj;
399
400	/*
401	 * Take care of compiling the selected class, and
402	 * then statically initialise the MMU object
403	 */
404	kobj_class_compile_static(mmu_def_impl, &mmu_kernel_kops);
405	kobj_init((kobj_t)mmu_obj, mmu_def_impl);
406
407	MMU_BOOTSTRAP(mmu_obj, start, end);
408}
409
410void
411pmap_cpu_bootstrap(int ap)
412{
413	/*
414	 * No KTR here because our console probably doesn't work yet
415	 */
416
417	return (MMU_CPU_BOOTSTRAP(mmu_obj, ap));
418}
419
420void *
421pmap_mapdev(vm_offset_t pa, vm_size_t size)
422{
423
424	CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
425	return (MMU_MAPDEV(mmu_obj, pa, size));
426}
427
428void
429pmap_unmapdev(vm_offset_t va, vm_size_t size)
430{
431
432	CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, size);
433	MMU_UNMAPDEV(mmu_obj, va, size);
434}
435
436vm_offset_t
437pmap_kextract(vm_offset_t va)
438{
439
440	CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
441	return (MMU_KEXTRACT(mmu_obj, va));
442}
443
444void
445pmap_kenter(vm_offset_t va, vm_offset_t pa)
446{
447
448	CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, pa);
449	MMU_KENTER(mmu_obj, va, pa);
450}
451
452boolean_t
453pmap_dev_direct_mapped(vm_offset_t pa, vm_size_t size)
454{
455
456	CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
457	return (MMU_DEV_DIRECT_MAPPED(mmu_obj, pa, size));
458}
459
460boolean_t
461pmap_page_executable(vm_page_t pg)
462{
463
464	CTR2(KTR_PMAP, "%s(%p)", __func__, pg);
465	return (MMU_PAGE_EXECUTABLE(mmu_obj, pg));
466}
467
468/*
469 * MMU install routines. Highest priority wins, equal priority also
470 * overrides allowing last-set to win.
471 */
472SET_DECLARE(mmu_set, mmu_def_t);
473
474boolean_t
475pmap_mmu_install(char *name, int prio)
476{
477	mmu_def_t	**mmupp, *mmup;
478	static int	curr_prio = 0;
479
480	/*
481	 * Try and locate the MMU kobj corresponding to the name
482	 */
483	SET_FOREACH(mmupp, mmu_set) {
484		mmup = *mmupp;
485
486		if (mmup->name &&
487		    !strcmp(mmup->name, name) &&
488		    prio >= curr_prio) {
489			curr_prio = prio;
490			mmu_def_impl = mmup;
491			return (TRUE);
492		}
493	}
494
495	return (FALSE);
496}
497