vm_machdep.c revision 72746
1/*-
2 * Copyright (c) 1982, 1986 The Regents of the University of California.
3 * Copyright (c) 1989, 1990 William Jolitz
4 * Copyright (c) 1994 John Dyson
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department, and William Jolitz.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 *    must display the following acknowledgement:
21 *	This product includes software developed by the University of
22 *	California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 *    may be used to endorse or promote products derived from this software
25 *    without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 *	from: @(#)vm_machdep.c	7.3 (Berkeley) 5/13/91
40 *	Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
41 * $FreeBSD: head/sys/powerpc/aim/vm_machdep.c 72746 2001-02-20 05:26:15Z jhb $
42 */
43/*
44 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
45 * All rights reserved.
46 *
47 * Author: Chris G. Demetriou
48 *
49 * Permission to use, copy, modify and distribute this software and
50 * its documentation is hereby granted, provided that both the copyright
51 * notice and this permission notice appear in all copies of the
52 * software, derivative works or modified versions, and any portions
53 * thereof, and that both notices appear in supporting documentation.
54 *
55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
56 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
58 *
59 * Carnegie Mellon requests users of this software to return to
60 *
61 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
62 *  School of Computer Science
63 *  Carnegie Mellon University
64 *  Pittsburgh PA 15213-3890
65 *
66 * any improvements or extensions that they make and grant Carnegie the
67 * rights to redistribute these changes.
68 */
69
70#include <sys/param.h>
71#include <sys/systm.h>
72#include <sys/proc.h>
73#include <sys/malloc.h>
74#include <sys/bio.h>
75#include <sys/buf.h>
76#include <sys/mutex.h>
77#include <sys/vnode.h>
78#include <sys/vmmeter.h>
79#include <sys/kernel.h>
80#include <sys/sysctl.h>
81#include <sys/unistd.h>
82
83#include <machine/clock.h>
84#include <machine/cpu.h>
85#include <machine/fpu.h>
86#include <machine/md_var.h>
87#include <machine/prom.h>
88
89#include <vm/vm.h>
90#include <vm/vm_param.h>
91#include <sys/lock.h>
92#include <vm/vm_kern.h>
93#include <vm/vm_page.h>
94#include <vm/vm_map.h>
95#include <vm/vm_extern.h>
96
97#include <sys/user.h>
98
99/*
100 * quick version of vm_fault
101 */
102int
103vm_fault_quick(v, prot)
104	caddr_t v;
105	int prot;
106{
107	int r;
108	if (prot & VM_PROT_WRITE)
109		r = subyte(v, fubyte(v));
110	else
111		r = fubyte(v);
112	return(r);
113}
114
115/*
116 * Finish a fork operation, with process p2 nearly set up.
117 * Copy and update the pcb, set up the stack so that the child
118 * ready to run and return to user mode.
119 */
120void
121cpu_fork(p1, p2, flags)
122	register struct proc *p1, *p2;
123	int flags;
124{
125	if ((flags & RFPROC) == 0)
126		return;
127
128	p2->p_md.md_tf = p1->p_md.md_tf;
129	p2->p_md.md_flags = p1->p_md.md_flags & (MDP_FPUSED | MDP_UAC_MASK);
130
131	/*
132	 * Cache the physical address of the pcb, so we can
133	 * swap to it easily.
134	 */
135	p2->p_md.md_pcbpaddr = (void*)vtophys((vm_offset_t)&p2->p_addr->u_pcb);
136
137	/*
138	 * Copy floating point state from the FP chip to the PCB
139	 * if this process has state stored there.
140	 */
141	alpha_fpstate_save(p1, 0);
142
143	/*
144	 * Copy pcb and stack from proc p1 to p2.  We do this as
145	 * cheaply as possible, copying only the active part of the
146	 * stack.  The stack and pcb need to agree. Make sure that the
147	 * new process has FEN disabled.
148	 */
149	p2->p_addr->u_pcb = p1->p_addr->u_pcb;
150	p2->p_addr->u_pcb.pcb_hw.apcb_usp = alpha_pal_rdusp();
151	p2->p_addr->u_pcb.pcb_hw.apcb_flags &= ~ALPHA_PCB_FLAGS_FEN;
152
153	/*
154	 * Set the floating point state.
155	 */
156	if ((p2->p_addr->u_pcb.pcb_fp_control & IEEE_INHERIT) == 0) {
157		p2->p_addr->u_pcb.pcb_fp_control = 0;
158		p2->p_addr->u_pcb.pcb_fp.fpr_cr = (FPCR_DYN_NORMAL
159						   | FPCR_INVD | FPCR_DZED
160						   | FPCR_OVFD | FPCR_INED
161						   | FPCR_UNFD);
162	}
163
164	/*
165	 * Arrange for a non-local goto when the new process
166	 * is started, to resume here, returning nonzero from setjmp.
167	 */
168#ifdef DIAGNOSTIC
169	if (p1 != curproc)
170		panic("cpu_fork: curproc");
171	alpha_fpstate_check(p1);
172#endif
173
174	/*
175	 * create the child's kernel stack, from scratch.
176	 */
177	{
178		struct user *up = p2->p_addr;
179		struct trapframe *p2tf;
180
181		/*
182		 * Pick a stack pointer, leaving room for a trapframe;
183		 * copy trapframe from parent so return to user mode
184		 * will be to right address, with correct registers.
185		 */
186		p2tf = p2->p_md.md_tf = (struct trapframe *)
187		    ((char *)p2->p_addr + USPACE - sizeof(struct trapframe));
188		bcopy(p1->p_md.md_tf, p2->p_md.md_tf,
189		    sizeof(struct trapframe));
190
191		/*
192		 * Set up return-value registers as fork() libc stub expects.
193		 */
194		p2tf->tf_regs[FRAME_V0] = 0; 	/* child's pid (linux) 	*/
195		p2tf->tf_regs[FRAME_A3] = 0;	/* no error 		*/
196		p2tf->tf_regs[FRAME_A4] = 1;	/* is child (FreeBSD) 	*/
197
198		/*
199		 * Arrange for continuation at fork_return(), which
200		 * will return to exception_return().  Note that the child
201		 * process doesn't stay in the kernel for long!
202		 *
203		 * This is an inlined version of cpu_set_kpc.
204		 */
205		up->u_pcb.pcb_hw.apcb_ksp = (u_int64_t)p2tf;
206		up->u_pcb.pcb_context[0] =
207		    (u_int64_t)fork_return;		/* s0: a0 */
208		up->u_pcb.pcb_context[1] =
209		    (u_int64_t)exception_return;	/* s1: ra */
210		up->u_pcb.pcb_context[2] = (u_long) p2;	/* s2: a1 */
211		up->u_pcb.pcb_context[7] =
212		    (u_int64_t)switch_trampoline;	/* ra: assembly magic */
213	}
214}
215
216/*
217 * Intercept the return address from a freshly forked process that has NOT
218 * been scheduled yet.
219 *
220 * This is needed to make kernel threads stay in kernel mode.
221 */
222void
223cpu_set_fork_handler(p, func, arg)
224	struct proc *p;
225	void (*func) __P((void *));
226	void *arg;
227{
228	/*
229	 * Note that the trap frame follows the args, so the function
230	 * is really called like this:  func(arg, frame);
231	 */
232	p->p_addr->u_pcb.pcb_context[0] = (u_long) func;
233	p->p_addr->u_pcb.pcb_context[2] = (u_long) arg;
234}
235
236/*
237 * cpu_exit is called as the last action during exit.
238 * We release the address space of the process, block interrupts,
239 * and call switch_exit.  switch_exit switches to proc0's PCB and stack,
240 * then jumps into the middle of cpu_switch, as if it were switching
241 * from proc0.
242 */
243void
244cpu_exit(p)
245	register struct proc *p;
246{
247	alpha_fpstate_drop(p);
248
249	mtx_lock_spin(&sched_lock);
250	mtx_unlock_flags(&Giant, MTX_NOSWITCH);
251	mtx_assert(&Giant, MA_NOTOWNED);
252
253	/*
254	 * We have to wait until after releasing all locks before
255	 * changing p_stat.  If we block on a mutex then we will be
256	 * back at SRUN when we resume and our parent will never
257	 * harvest us.
258	 */
259	p->p_stat = SZOMB;
260
261	mp_fixme("assumption: p_pptr won't change at this time");
262	wakeup(p->p_pptr);
263
264	cnt.v_swtch++;
265	cpu_switch();
266	panic("cpu_exit");
267}
268
269void
270cpu_wait(p)
271	struct proc *p;
272{
273	/* drop per-process resources */
274	pmap_dispose_proc(p);
275
276	/* and clean-out the vmspace */
277	vmspace_free(p->p_vmspace);
278}
279
280/*
281 * Dump the machine specific header information at the start of a core dump.
282 */
283int
284cpu_coredump(p, vp, cred)
285	struct proc *p;
286	struct vnode *vp;
287	struct ucred *cred;
288{
289
290	return (vn_rdwr(UIO_WRITE, vp, (caddr_t) p->p_addr, ctob(UPAGES),
291	    (off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *)NULL,
292	    p));
293}
294
295#ifdef notyet
296static void
297setredzone(pte, vaddr)
298	u_short *pte;
299	caddr_t vaddr;
300{
301/* eventually do this by setting up an expand-down stack segment
302   for ss0: selector, allowing stack access down to top of u.
303   this means though that protection violations need to be handled
304   thru a double fault exception that must do an integral task
305   switch to a known good context, within which a dump can be
306   taken. a sensible scheme might be to save the initial context
307   used by sched (that has physical memory mapped 1:1 at bottom)
308   and take the dump while still in mapped mode */
309}
310#endif
311
312/*
313 * Map an IO request into kernel virtual address space.
314 *
315 * All requests are (re)mapped into kernel VA space.
316 * Notice that we use b_bufsize for the size of the buffer
317 * to be mapped.  b_bcount might be modified by the driver.
318 */
319void
320vmapbuf(bp)
321	register struct buf *bp;
322{
323	register caddr_t addr, v, kva;
324	vm_offset_t pa;
325
326	if ((bp->b_flags & B_PHYS) == 0)
327		panic("vmapbuf");
328
329	for (v = bp->b_saveaddr, addr = (caddr_t)trunc_page(bp->b_data);
330	    addr < bp->b_data + bp->b_bufsize;
331	    addr += PAGE_SIZE, v += PAGE_SIZE) {
332		/*
333		 * Do the vm_fault if needed; do the copy-on-write thing
334		 * when reading stuff off device into memory.
335		 */
336		vm_fault_quick(addr,
337			(bp->b_iocmd == BIO_READ)?(VM_PROT_READ|VM_PROT_WRITE):VM_PROT_READ);
338		pa = trunc_page(pmap_kextract((vm_offset_t) addr));
339		if (pa == 0)
340			panic("vmapbuf: page not present");
341		vm_page_hold(PHYS_TO_VM_PAGE(pa));
342		pmap_kenter((vm_offset_t) v, pa);
343	}
344
345	kva = bp->b_saveaddr;
346	bp->b_saveaddr = bp->b_data;
347	bp->b_data = kva + (((vm_offset_t) bp->b_data) & PAGE_MASK);
348}
349
350/*
351 * Free the io map PTEs associated with this IO operation.
352 * We also invalidate the TLB entries and restore the original b_addr.
353 */
354void
355vunmapbuf(bp)
356	register struct buf *bp;
357{
358	register caddr_t addr;
359	vm_offset_t pa;
360
361	if ((bp->b_flags & B_PHYS) == 0)
362		panic("vunmapbuf");
363
364	for (addr = (caddr_t)trunc_page(bp->b_data);
365	    addr < bp->b_data + bp->b_bufsize;
366	    addr += PAGE_SIZE) {
367		pa = trunc_page(pmap_kextract((vm_offset_t) addr));
368		pmap_kremove((vm_offset_t) addr);
369		vm_page_unhold(PHYS_TO_VM_PAGE(pa));
370	}
371
372	bp->b_data = bp->b_saveaddr;
373}
374
375/*
376 * Reset back to firmware.
377 */
378void
379cpu_reset()
380{
381	prom_halt(0);
382}
383
384int
385grow_stack(p, sp)
386	struct proc *p;
387	size_t sp;
388{
389	int rv;
390
391	rv = vm_map_growstack (p, sp);
392	if (rv != KERN_SUCCESS)
393		return (0);
394
395	return (1);
396}
397
398
399static int cnt_prezero;
400
401SYSCTL_INT(_machdep, OID_AUTO, cnt_prezero, CTLFLAG_RD, &cnt_prezero, 0, "");
402
403/*
404 * Implement the pre-zeroed page mechanism.
405 * This routine is called from the idle loop.
406 */
407
408#define ZIDLE_LO(v)    ((v) * 2 / 3)
409#define ZIDLE_HI(v)    ((v) * 4 / 5)
410
411int
412vm_page_zero_idle()
413{
414	static int free_rover;
415	static int zero_state;
416	vm_page_t m;
417	int s;
418
419	/*
420         * Attempt to maintain approximately 1/2 of our free pages in a
421         * PG_ZERO'd state.   Add some hysteresis to (attempt to) avoid
422         * generally zeroing a page when the system is near steady-state.
423         * Otherwise we might get 'flutter' during disk I/O / IPC or
424         * fast sleeps.  We also do not want to be continuously zeroing
425         * pages because doing so may flush our L1 and L2 caches too much.
426	 */
427
428	if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count))
429		return(0);
430	if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
431		return(0);
432
433	if (mtx_trylock(&Giant)) {
434		s = splvm();
435		m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
436		zero_state = 0;
437		if (m != NULL && (m->flags & PG_ZERO) == 0) {
438			vm_page_queues[m->queue].lcnt--;
439			TAILQ_REMOVE(&vm_page_queues[m->queue].pl, m, pageq);
440			m->queue = PQ_NONE;
441			splx(s);
442#if 0
443			rel_mplock();
444#endif
445			pmap_zero_page(VM_PAGE_TO_PHYS(m));
446#if 0
447			get_mplock();
448#endif
449			(void)splvm();
450			vm_page_flag_set(m, PG_ZERO);
451			m->queue = PQ_FREE + m->pc;
452			vm_page_queues[m->queue].lcnt++;
453			TAILQ_INSERT_TAIL(&vm_page_queues[m->queue].pl, m,
454			    pageq);
455			++vm_page_zero_count;
456			++cnt_prezero;
457			if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
458				zero_state = 1;
459		}
460		free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK;
461		splx(s);
462		mtx_unlock(&Giant);
463		return (1);
464	}
465	return (0);
466}
467
468/*
469 * Software interrupt handler for queued VM system processing.
470 */
471void
472swi_vm(void *dummy)
473{
474	if (busdma_swi_pending != 0)
475		busdma_swi();
476}
477
478/*
479 * Tell whether this address is in some physical memory region.
480 * Currently used by the kernel coredump code in order to avoid
481 * dumping the ``ISA memory hole'' which could cause indefinite hangs,
482 * or other unpredictable behaviour.
483 */
484
485
486int
487is_physical_memory(addr)
488	vm_offset_t addr;
489{
490	/*
491	 * stuff other tests for known memory-mapped devices (PCI?)
492	 * here
493	 */
494
495	return 1;
496}
497