1/*-
2 * Copyright (c) 2005 Peter Grehan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD$");
30
31/*
32 * Dispatch MI pmap calls to the appropriate MMU implementation
33 * through a previously registered kernel object.
34 *
35 * Before pmap_bootstrap() can be called, a CPU module must have
36 * called pmap_mmu_install(). This may be called multiple times:
37 * the highest priority call will be installed as the default
38 * MMU handler when pmap_bootstrap() is called.
39 *
40 * It is required that mutex_init() be called before pmap_bootstrap(),
41 * as the PMAP layer makes extensive use of mutexes.
42 */
43
44#include <sys/param.h>
45#include <sys/kernel.h>
46#include <sys/lock.h>
47#include <sys/ktr.h>
48#include <sys/mutex.h>
49#include <sys/systm.h>
50
51#include <vm/vm.h>
52#include <vm/vm_page.h>
53
54#include <machine/mmuvar.h>
55#include <machine/smp.h>
56
57#include "mmu_if.h"
58
59static mmu_def_t	*mmu_def_impl;
60static mmu_t		mmu_obj;
61static struct mmu_kobj	mmu_kernel_obj;
62static struct kobj_ops	mmu_kernel_kops;
63
64/*
65 * pmap globals
66 */
67struct pmap kernel_pmap_store;
68
69struct msgbuf *msgbufp;
70vm_offset_t    msgbuf_phys;
71
72vm_offset_t kernel_vm_end;
73vm_offset_t phys_avail[PHYS_AVAIL_SZ];
74vm_offset_t virtual_avail;
75vm_offset_t virtual_end;
76
77int pmap_bootstrapped;
78
79void
80pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
81{
82
83	CTR4(KTR_PMAP, "%s(%p, %#x, %u)", __func__, pmap, va, wired);
84	MMU_CHANGE_WIRING(mmu_obj, pmap, va, wired);
85}
86
87void
88pmap_clear_modify(vm_page_t m)
89{
90
91	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
92	MMU_CLEAR_MODIFY(mmu_obj, m);
93}
94
95void
96pmap_clear_reference(vm_page_t m)
97{
98
99	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
100	MMU_CLEAR_REFERENCE(mmu_obj, m);
101}
102
103void
104pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
105    vm_size_t len, vm_offset_t src_addr)
106{
107
108	CTR6(KTR_PMAP, "%s(%p, %p, %#x, %#x, %#x)", __func__, dst_pmap,
109	    src_pmap, dst_addr, len, src_addr);
110	MMU_COPY(mmu_obj, dst_pmap, src_pmap, dst_addr, len, src_addr);
111}
112
113void
114pmap_copy_page(vm_page_t src, vm_page_t dst)
115{
116
117	CTR3(KTR_PMAP, "%s(%p, %p)", __func__, src, dst);
118	MMU_COPY_PAGE(mmu_obj, src, dst);
119}
120
121void
122pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
123    vm_offset_t b_offset, int xfersize)
124{
125
126	CTR6(KTR_PMAP, "%s(%p, %#x, %p, %#x, %#x)", __func__, ma,
127	    a_offset, mb, b_offset, xfersize);
128	MMU_COPY_PAGES(mmu_obj, ma, a_offset, mb, b_offset, xfersize);
129}
130
131void
132pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t p,
133    vm_prot_t prot, boolean_t wired)
134{
135
136	CTR6(KTR_PMAP, "pmap_enter(%p, %#x, %#x, %p, %#x, %u)", pmap, va,
137	    access, p, prot, wired);
138	MMU_ENTER(mmu_obj, pmap, va, p, prot, wired);
139}
140
141void
142pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
143    vm_page_t m_start, vm_prot_t prot)
144{
145
146	CTR6(KTR_PMAP, "%s(%p, %#x, %#x, %p, %#x)", __func__, pmap, start,
147	    end, m_start, prot);
148	MMU_ENTER_OBJECT(mmu_obj, pmap, start, end, m_start, prot);
149}
150
151void
152pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
153{
154
155	CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, pmap, va, m, prot);
156	MMU_ENTER_QUICK(mmu_obj, pmap, va, m, prot);
157}
158
159vm_paddr_t
160pmap_extract(pmap_t pmap, vm_offset_t va)
161{
162
163	CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, va);
164	return (MMU_EXTRACT(mmu_obj, pmap, va));
165}
166
167vm_page_t
168pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
169{
170
171	CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, va, prot);
172	return (MMU_EXTRACT_AND_HOLD(mmu_obj, pmap, va, prot));
173}
174
175void
176pmap_growkernel(vm_offset_t va)
177{
178
179	CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
180	MMU_GROWKERNEL(mmu_obj, va);
181}
182
183void
184pmap_init(void)
185{
186
187	CTR1(KTR_PMAP, "%s()", __func__);
188	MMU_INIT(mmu_obj);
189}
190
191boolean_t
192pmap_is_modified(vm_page_t m)
193{
194
195	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
196	return (MMU_IS_MODIFIED(mmu_obj, m));
197}
198
199boolean_t
200pmap_is_prefaultable(pmap_t pmap, vm_offset_t va)
201{
202
203	CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, va);
204	return (MMU_IS_PREFAULTABLE(mmu_obj, pmap, va));
205}
206
207boolean_t
208pmap_is_referenced(vm_page_t m)
209{
210
211	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
212	return (MMU_IS_REFERENCED(mmu_obj, m));
213}
214
215boolean_t
216pmap_ts_referenced(vm_page_t m)
217{
218
219	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
220	return (MMU_TS_REFERENCED(mmu_obj, m));
221}
222
223vm_offset_t
224pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
225{
226
227	CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, virt, start, end,
228	    prot);
229	return (MMU_MAP(mmu_obj, virt, start, end, prot));
230}
231
232void
233pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
234    vm_pindex_t pindex, vm_size_t size)
235{
236
237	CTR6(KTR_PMAP, "%s(%p, %#x, %p, %u, %#x)", __func__, pmap, addr,
238	    object, pindex, size);
239	MMU_OBJECT_INIT_PT(mmu_obj, pmap, addr, object, pindex, size);
240}
241
242boolean_t
243pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
244{
245
246	CTR3(KTR_PMAP, "%s(%p, %p)", __func__, pmap, m);
247	return (MMU_PAGE_EXISTS_QUICK(mmu_obj, pmap, m));
248}
249
250void
251pmap_page_init(vm_page_t m)
252{
253
254	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
255	MMU_PAGE_INIT(mmu_obj, m);
256}
257
258int
259pmap_page_wired_mappings(vm_page_t m)
260{
261
262	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
263	return (MMU_PAGE_WIRED_MAPPINGS(mmu_obj, m));
264}
265
266int
267pmap_pinit(pmap_t pmap)
268{
269
270	CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
271	MMU_PINIT(mmu_obj, pmap);
272	return (1);
273}
274
275void
276pmap_pinit0(pmap_t pmap)
277{
278
279	CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
280	MMU_PINIT0(mmu_obj, pmap);
281}
282
283void
284pmap_protect(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_prot_t prot)
285{
286
287	CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, pmap, start, end,
288	    prot);
289	MMU_PROTECT(mmu_obj, pmap, start, end, prot);
290}
291
292void
293pmap_qenter(vm_offset_t start, vm_page_t *m, int count)
294{
295
296	CTR4(KTR_PMAP, "%s(%#x, %p, %d)", __func__, start, m, count);
297	MMU_QENTER(mmu_obj, start, m, count);
298}
299
300void
301pmap_qremove(vm_offset_t start, int count)
302{
303
304	CTR3(KTR_PMAP, "%s(%#x, %d)", __func__, start, count);
305	MMU_QREMOVE(mmu_obj, start, count);
306}
307
308void
309pmap_release(pmap_t pmap)
310{
311
312	CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
313	MMU_RELEASE(mmu_obj, pmap);
314}
315
316void
317pmap_remove(pmap_t pmap, vm_offset_t start, vm_offset_t end)
318{
319
320	CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, start, end);
321	MMU_REMOVE(mmu_obj, pmap, start, end);
322}
323
324void
325pmap_remove_all(vm_page_t m)
326{
327
328	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
329	MMU_REMOVE_ALL(mmu_obj, m);
330}
331
332void
333pmap_remove_pages(pmap_t pmap)
334{
335
336	CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
337	MMU_REMOVE_PAGES(mmu_obj, pmap);
338}
339
340void
341pmap_remove_write(vm_page_t m)
342{
343
344	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
345	MMU_REMOVE_WRITE(mmu_obj, m);
346}
347
348void
349pmap_zero_page(vm_page_t m)
350{
351
352	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
353	MMU_ZERO_PAGE(mmu_obj, m);
354}
355
356void
357pmap_zero_page_area(vm_page_t m, int off, int size)
358{
359
360	CTR4(KTR_PMAP, "%s(%p, %d, %d)", __func__, m, off, size);
361	MMU_ZERO_PAGE_AREA(mmu_obj, m, off, size);
362}
363
364void
365pmap_zero_page_idle(vm_page_t m)
366{
367
368	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
369	MMU_ZERO_PAGE_IDLE(mmu_obj, m);
370}
371
372int
373pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
374{
375
376	CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr);
377	return (MMU_MINCORE(mmu_obj, pmap, addr, locked_pa));
378}
379
380void
381pmap_activate(struct thread *td)
382{
383
384	CTR2(KTR_PMAP, "%s(%p)", __func__, td);
385	MMU_ACTIVATE(mmu_obj, td);
386}
387
388void
389pmap_deactivate(struct thread *td)
390{
391
392	CTR2(KTR_PMAP, "%s(%p)", __func__, td);
393	MMU_DEACTIVATE(mmu_obj, td);
394}
395
396/*
397 *	Increase the starting virtual address of the given mapping if a
398 *	different alignment might result in more superpage mappings.
399 */
400void
401pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
402    vm_offset_t *addr, vm_size_t size)
403{
404
405	CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, object, offset, addr,
406	    size);
407	MMU_ALIGN_SUPERPAGE(mmu_obj, object, offset, addr, size);
408}
409
410/*
411 * Routines used in machine-dependent code
412 */
413void
414pmap_bootstrap(vm_offset_t start, vm_offset_t end)
415{
416	mmu_obj = &mmu_kernel_obj;
417
418	/*
419	 * Take care of compiling the selected class, and
420	 * then statically initialise the MMU object
421	 */
422	kobj_class_compile_static(mmu_def_impl, &mmu_kernel_kops);
423	kobj_init_static((kobj_t)mmu_obj, mmu_def_impl);
424
425	MMU_BOOTSTRAP(mmu_obj, start, end);
426}
427
428void
429pmap_cpu_bootstrap(int ap)
430{
431	/*
432	 * No KTR here because our console probably doesn't work yet
433	 */
434
435	return (MMU_CPU_BOOTSTRAP(mmu_obj, ap));
436}
437
438void *
439pmap_mapdev(vm_offset_t pa, vm_size_t size)
440{
441
442	CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
443	return (MMU_MAPDEV(mmu_obj, pa, size));
444}
445
446void *
447pmap_mapdev_attr(vm_offset_t pa, vm_size_t size, vm_memattr_t attr)
448{
449
450	CTR4(KTR_PMAP, "%s(%#x, %#x, %#x)", __func__, pa, size, attr);
451	return (MMU_MAPDEV_ATTR(mmu_obj, pa, size, attr));
452}
453
454void
455pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
456{
457
458	CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, m, ma);
459	return (MMU_PAGE_SET_MEMATTR(mmu_obj, m, ma));
460}
461
462void
463pmap_unmapdev(vm_offset_t va, vm_size_t size)
464{
465
466	CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, size);
467	MMU_UNMAPDEV(mmu_obj, va, size);
468}
469
470vm_offset_t
471pmap_kextract(vm_offset_t va)
472{
473
474	CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
475	return (MMU_KEXTRACT(mmu_obj, va));
476}
477
478void
479pmap_kenter(vm_offset_t va, vm_offset_t pa)
480{
481
482	CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, pa);
483	MMU_KENTER(mmu_obj, va, pa);
484}
485
486void
487pmap_kenter_attr(vm_offset_t va, vm_offset_t pa, vm_memattr_t ma)
488{
489
490	CTR4(KTR_PMAP, "%s(%#x, %#x, %#x)", __func__, va, pa, ma);
491	MMU_KENTER_ATTR(mmu_obj, va, pa, ma);
492}
493
494boolean_t
495pmap_dev_direct_mapped(vm_offset_t pa, vm_size_t size)
496{
497
498	CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
499	return (MMU_DEV_DIRECT_MAPPED(mmu_obj, pa, size));
500}
501
502void
503pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
504{
505
506	CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pm, va, sz);
507	return (MMU_SYNC_ICACHE(mmu_obj, pm, va, sz));
508}
509
510vm_offset_t
511pmap_dumpsys_map(struct pmap_md *md, vm_size_t ofs, vm_size_t *sz)
512{
513
514	CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, md, ofs, *sz);
515	return (MMU_DUMPSYS_MAP(mmu_obj, md, ofs, sz));
516}
517
518void
519pmap_dumpsys_unmap(struct pmap_md *md, vm_size_t ofs, vm_offset_t va)
520{
521
522	CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, md, ofs, va);
523	return (MMU_DUMPSYS_UNMAP(mmu_obj, md, ofs, va));
524}
525
526struct pmap_md *
527pmap_scan_md(struct pmap_md *prev)
528{
529
530	CTR2(KTR_PMAP, "%s(%p)", __func__, prev);
531	return (MMU_SCAN_MD(mmu_obj, prev));
532}
533
534/*
535 * MMU install routines. Highest priority wins, equal priority also
536 * overrides allowing last-set to win.
537 */
538SET_DECLARE(mmu_set, mmu_def_t);
539
540boolean_t
541pmap_mmu_install(char *name, int prio)
542{
543	mmu_def_t	**mmupp, *mmup;
544	static int	curr_prio = 0;
545
546	/*
547	 * Try and locate the MMU kobj corresponding to the name
548	 */
549	SET_FOREACH(mmupp, mmu_set) {
550		mmup = *mmupp;
551
552		if (mmup->name &&
553		    !strcmp(mmup->name, name) &&
554		    (prio >= curr_prio || mmu_def_impl == NULL)) {
555			curr_prio = prio;
556			mmu_def_impl = mmup;
557			return (TRUE);
558		}
559	}
560
561	return (FALSE);
562}
563
564int unmapped_buf_allowed;
565