vm_glue.c revision 92666
1/*
2 * Copyright (c) 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: @(#)vm_glue.c	8.6 (Berkeley) 1/5/94
37 *
38 *
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
41 *
42 * Permission to use, copy, modify and distribute this software and
43 * its documentation is hereby granted, provided that both the copyright
44 * notice and this permission notice appear in all copies of the
45 * software, derivative works or modified versions, and any portions
46 * thereof, and that both notices appear in supporting documentation.
47 *
48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51 *
52 * Carnegie Mellon requests users of this software to return to
53 *
54 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55 *  School of Computer Science
56 *  Carnegie Mellon University
57 *  Pittsburgh PA 15213-3890
58 *
59 * any improvements or extensions that they make and grant Carnegie the
60 * rights to redistribute these changes.
61 *
62 * $FreeBSD: head/sys/vm/vm_glue.c 92666 2002-03-19 11:02:06Z peter $
63 */
64
65#include "opt_vm.h"
66
67#include <sys/param.h>
68#include <sys/systm.h>
69#include <sys/lock.h>
70#include <sys/mutex.h>
71#include <sys/proc.h>
72#include <sys/resourcevar.h>
73#include <sys/shm.h>
74#include <sys/vmmeter.h>
75#include <sys/sx.h>
76#include <sys/sysctl.h>
77
78#include <sys/kernel.h>
79#include <sys/ktr.h>
80#include <sys/unistd.h>
81
82#include <machine/limits.h>
83
84#include <vm/vm.h>
85#include <vm/vm_param.h>
86#include <vm/pmap.h>
87#include <vm/vm_map.h>
88#include <vm/vm_page.h>
89#include <vm/vm_pageout.h>
90#include <vm/vm_kern.h>
91#include <vm/vm_extern.h>
92
93#include <sys/user.h>
94
95extern int maxslp;
96
97/*
98 * System initialization
99 *
100 * Note: proc0 from proc.h
101 */
102static void vm_init_limits __P((void *));
103SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0)
104
105/*
106 * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
107 *
108 * Note: run scheduling should be divorced from the vm system.
109 */
110static void scheduler __P((void *));
111SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL)
112
113#ifndef NO_SWAPPING
114static void swapout __P((struct proc *));
115#endif
116
117int
118kernacc(addr, len, rw)
119	caddr_t addr;
120	int len, rw;
121{
122	boolean_t rv;
123	vm_offset_t saddr, eaddr;
124	vm_prot_t prot;
125
126	KASSERT((rw & ~VM_PROT_ALL) == 0,
127	    ("illegal ``rw'' argument to kernacc (%x)\n", rw));
128	prot = rw;
129	saddr = trunc_page((vm_offset_t)addr);
130	eaddr = round_page((vm_offset_t)addr + len);
131	rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
132	return (rv == TRUE);
133}
134
135int
136useracc(addr, len, rw)
137	caddr_t addr;
138	int len, rw;
139{
140	boolean_t rv;
141	vm_prot_t prot;
142
143	GIANT_REQUIRED;
144
145	KASSERT((rw & ~VM_PROT_ALL) == 0,
146	    ("illegal ``rw'' argument to useracc (%x)\n", rw));
147	prot = rw;
148	/*
149	 * XXX - check separately to disallow access to user area and user
150	 * page tables - they are in the map.
151	 *
152	 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max.  It was once
153	 * only used (as an end address) in trap.c.  Use it as an end address
154	 * here too.  This bogusness has spread.  I just fixed where it was
155	 * used as a max in vm_mmap.c.
156	 */
157	if ((vm_offset_t) addr + len > /* XXX */ VM_MAXUSER_ADDRESS
158	    || (vm_offset_t) addr + len < (vm_offset_t) addr) {
159		return (FALSE);
160	}
161	rv = vm_map_check_protection(&curproc->p_vmspace->vm_map,
162	    trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
163	    prot);
164	return (rv == TRUE);
165}
166
167void
168vslock(addr, len)
169	caddr_t addr;
170	u_int len;
171{
172	GIANT_REQUIRED;
173	vm_map_pageable(&curproc->p_vmspace->vm_map,
174	    trunc_page((vm_offset_t)addr),
175	    round_page((vm_offset_t)addr + len), FALSE);
176}
177
178void
179vsunlock(addr, len)
180	caddr_t addr;
181	u_int len;
182{
183	GIANT_REQUIRED;
184	vm_map_pageable(&curproc->p_vmspace->vm_map,
185	    trunc_page((vm_offset_t)addr),
186	    round_page((vm_offset_t)addr + len), TRUE);
187}
188
189/*
190 * Implement fork's actions on an address space.
191 * Here we arrange for the address space to be copied or referenced,
192 * allocate a user struct (pcb and kernel stack), then call the
193 * machine-dependent layer to fill those in and make the new process
194 * ready to run.  The new process is set up so that it returns directly
195 * to user mode to avoid stack copying and relocation problems.
196 */
197void
198vm_forkproc(td, p2, td2, flags)
199	struct thread *td;
200	struct proc *p2;
201	struct thread *td2;
202	int flags;
203{
204	struct proc *p1 = td->td_proc;
205	struct user *up;
206
207	GIANT_REQUIRED;
208
209	if ((flags & RFPROC) == 0) {
210		/*
211		 * Divorce the memory, if it is shared, essentially
212		 * this changes shared memory amongst threads, into
213		 * COW locally.
214		 */
215		if ((flags & RFMEM) == 0) {
216			if (p1->p_vmspace->vm_refcnt > 1) {
217				vmspace_unshare(p1);
218			}
219		}
220		cpu_fork(td, p2, td2, flags);
221		return;
222	}
223
224	if (flags & RFMEM) {
225		p2->p_vmspace = p1->p_vmspace;
226		p1->p_vmspace->vm_refcnt++;
227	}
228
229	while (vm_page_count_severe()) {
230		VM_WAIT;
231	}
232
233	if ((flags & RFMEM) == 0) {
234		p2->p_vmspace = vmspace_fork(p1->p_vmspace);
235
236		pmap_pinit2(vmspace_pmap(p2->p_vmspace));
237
238		if (p1->p_vmspace->vm_shm)
239			shmfork(p1, p2);
240	}
241
242	pmap_new_proc(p2);
243	pmap_new_thread(td2);		/* Initial thread */
244
245	/* XXXKSE this is unsatisfactory but should be adequate */
246	up = p2->p_uarea;
247
248	/*
249	 * p_stats currently points at fields in the user struct
250	 * but not at &u, instead at p_addr. Copy parts of
251	 * p_stats; zero the rest of p_stats (statistics).
252	 *
253	 * If procsig->ps_refcnt is 1 and p2->p_sigacts is NULL we dont' need
254	 * to share sigacts, so we use the up->u_sigacts.
255	 */
256	p2->p_stats = &up->u_stats;
257	if (p2->p_sigacts == NULL) {
258		if (p2->p_procsig->ps_refcnt != 1)
259			printf ("PID:%d NULL sigacts with refcnt not 1!\n",p2->p_pid);
260		p2->p_sigacts = &up->u_sigacts;
261		up->u_sigacts = *p1->p_sigacts;
262	}
263
264	bzero(&up->u_stats.pstat_startzero,
265	    (unsigned) ((caddr_t) &up->u_stats.pstat_endzero -
266		(caddr_t) &up->u_stats.pstat_startzero));
267	bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy,
268	    ((caddr_t) &up->u_stats.pstat_endcopy -
269		(caddr_t) &up->u_stats.pstat_startcopy));
270
271
272	/*
273	 * cpu_fork will copy and update the pcb, set up the kernel stack,
274	 * and make the child ready to run.
275	 */
276	cpu_fork(td, p2, td2, flags);
277}
278
279/*
280 * Called after process has been wait(2)'ed apon and is being reaped.
281 * The idea is to reclaim resources that we could not reclaim while
282 * the process was still executing.
283 */
284void
285vm_waitproc(p)
286	struct proc *p;
287{
288	struct thread *td;
289
290	GIANT_REQUIRED;
291	cpu_wait(p);
292	pmap_dispose_proc(p);		/* drop per-process resources */
293	FOREACH_THREAD_IN_PROC(p, td)
294		pmap_dispose_thread(td);
295	vmspace_exitfree(p);		/* and clean-out the vmspace */
296}
297
298/*
299 * Set default limits for VM system.
300 * Called for proc 0, and then inherited by all others.
301 *
302 * XXX should probably act directly on proc0.
303 */
304static void
305vm_init_limits(udata)
306	void *udata;
307{
308	struct proc *p = udata;
309	int rss_limit;
310
311	/*
312	 * Set up the initial limits on process VM. Set the maximum resident
313	 * set size to be half of (reasonably) available memory.  Since this
314	 * is a soft limit, it comes into effect only when the system is out
315	 * of memory - half of main memory helps to favor smaller processes,
316	 * and reduces thrashing of the object cache.
317	 */
318	p->p_rlimit[RLIMIT_STACK].rlim_cur = dflssiz;
319	p->p_rlimit[RLIMIT_STACK].rlim_max = maxssiz;
320	p->p_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz;
321	p->p_rlimit[RLIMIT_DATA].rlim_max = maxdsiz;
322	/* limit the limit to no less than 2MB */
323	rss_limit = max(cnt.v_free_count, 512);
324	p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
325	p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
326}
327
328void
329faultin(p)
330	struct proc *p;
331{
332	struct thread *td;
333	GIANT_REQUIRED;
334
335	PROC_LOCK_ASSERT(p, MA_OWNED);
336	mtx_lock_spin(&sched_lock);
337	if ((p->p_sflag & PS_INMEM) == 0) {
338		++p->p_lock;
339		mtx_unlock_spin(&sched_lock);
340		PROC_UNLOCK(p);
341
342		pmap_swapin_proc(p);
343		FOREACH_THREAD_IN_PROC (p, td)
344			pmap_swapin_thread(td);
345
346		PROC_LOCK(p);
347		mtx_lock_spin(&sched_lock);
348		FOREACH_THREAD_IN_PROC (p, td)
349			if (td->td_proc->p_stat == SRUN)	/* XXXKSE */
350				setrunqueue(td);
351
352		p->p_sflag |= PS_INMEM;
353
354		/* undo the effect of setting SLOCK above */
355		--p->p_lock;
356	}
357	mtx_unlock_spin(&sched_lock);
358}
359
360/*
361 * This swapin algorithm attempts to swap-in processes only if there
362 * is enough space for them.  Of course, if a process waits for a long
363 * time, it will be swapped in anyway.
364 *
365 *  XXXKSE - KSEGRP with highest priority counts..
366 *
367 * Giant is still held at this point, to be released in tsleep.
368 */
369/* ARGSUSED*/
370static void
371scheduler(dummy)
372	void *dummy;
373{
374	struct proc *p;
375	int pri;
376	struct proc *pp;
377	int ppri;
378
379	mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED);
380	/* GIANT_REQUIRED */
381
382loop:
383	if (vm_page_count_min()) {
384		VM_WAIT;
385		goto loop;
386	}
387
388	pp = NULL;
389	ppri = INT_MIN;
390	sx_slock(&allproc_lock);
391	FOREACH_PROC_IN_SYSTEM(p) {
392		struct ksegrp *kg;
393		mtx_lock_spin(&sched_lock);
394		if (p->p_stat == SRUN
395		&& (p->p_sflag & (PS_INMEM | PS_SWAPPING)) == 0) {
396			/* Find the minimum sleeptime for the process */
397			FOREACH_KSEGRP_IN_PROC(p, kg) {
398				pri = p->p_swtime + kg->kg_slptime;
399				if ((p->p_sflag & PS_SWAPINREQ) == 0) {
400					pri -= kg->kg_nice * 8;
401				}
402
403				/*
404				 * if this ksegrp is higher priority
405				 * and there is enough space, then select
406				 * this process instead of the previous
407				 * selection.
408				 */
409				if (pri > ppri) {
410					pp = p;
411					ppri = pri;
412				}
413			}
414		}
415		mtx_unlock_spin(&sched_lock);
416	}
417	sx_sunlock(&allproc_lock);
418
419	/*
420	 * Nothing to do, back to sleep.
421	 */
422	if ((p = pp) == NULL) {
423		tsleep(&proc0, PVM, "sched", maxslp * hz / 2);
424		goto loop;
425	}
426	mtx_lock_spin(&sched_lock);
427	p->p_sflag &= ~PS_SWAPINREQ;
428	mtx_unlock_spin(&sched_lock);
429
430	/*
431	 * We would like to bring someone in. (only if there is space).
432	 */
433	PROC_LOCK(p);
434	faultin(p);
435	PROC_UNLOCK(p);
436	mtx_lock_spin(&sched_lock);
437	p->p_swtime = 0;
438	mtx_unlock_spin(&sched_lock);
439	goto loop;
440}
441
442#ifndef NO_SWAPPING
443
444/*
445 * Swap_idle_threshold1 is the guaranteed swapped in time for a process
446 */
447static int swap_idle_threshold1 = 2;
448SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1,
449	CTLFLAG_RW, &swap_idle_threshold1, 0, "");
450
451/*
452 * Swap_idle_threshold2 is the time that a process can be idle before
453 * it will be swapped out, if idle swapping is enabled.
454 */
455static int swap_idle_threshold2 = 10;
456SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2,
457	CTLFLAG_RW, &swap_idle_threshold2, 0, "");
458
459/*
460 * Swapout is driven by the pageout daemon.  Very simple, we find eligible
461 * procs and unwire their u-areas.  We try to always "swap" at least one
462 * process in case we need the room for a swapin.
463 * If any procs have been sleeping/stopped for at least maxslp seconds,
464 * they are swapped.  Else, we swap the longest-sleeping or stopped process,
465 * if any, otherwise the longest-resident process.
466 */
467void
468swapout_procs(action)
469int action;
470{
471	struct proc *p;
472	struct ksegrp *kg;
473	struct proc *outp, *outp2;
474	int outpri, outpri2;
475	int didswap = 0;
476
477	GIANT_REQUIRED;
478
479	outp = outp2 = NULL;
480	outpri = outpri2 = INT_MIN;
481retry:
482	sx_slock(&allproc_lock);
483	LIST_FOREACH(p, &allproc, p_list) {
484		struct vmspace *vm;
485		int minslptime = 100000;
486
487		PROC_LOCK(p);
488		if (p->p_lock != 0 ||
489		    (p->p_flag & (P_TRACED|P_SYSTEM|P_WEXIT)) != 0) {
490			PROC_UNLOCK(p);
491			continue;
492		}
493		/*
494		 * only aiod changes vmspace, however it will be
495		 * skipped because of the if statement above checking
496		 * for P_SYSTEM
497		 */
498		vm = p->p_vmspace;
499		mtx_lock_spin(&sched_lock);
500		if ((p->p_sflag & (PS_INMEM|PS_SWAPPING)) != PS_INMEM) {
501			mtx_unlock_spin(&sched_lock);
502			PROC_UNLOCK(p);
503			continue;
504		}
505
506		switch (p->p_stat) {
507		default:
508			mtx_unlock_spin(&sched_lock);
509			PROC_UNLOCK(p);
510			continue;
511
512		case SSLEEP:
513		case SSTOP:
514			/*
515			 * do not swapout a realtime process
516			 * Check all the thread groups..
517			 */
518			FOREACH_KSEGRP_IN_PROC(p, kg) {
519				if (PRI_IS_REALTIME(kg->kg_pri_class)) {
520					mtx_unlock_spin(&sched_lock);
521					PROC_UNLOCK(p);
522					goto nextproc;
523				}
524
525				/*
526				 * Do not swapout a process waiting
527				 * on a critical event of some kind.
528				 * Also guarantee swap_idle_threshold1
529				 * time in memory.
530				 */
531				if (((FIRST_THREAD_IN_PROC(p)->td_priority) < PSOCK) ||
532				    (kg->kg_slptime < swap_idle_threshold1)) {
533					mtx_unlock_spin(&sched_lock);
534					PROC_UNLOCK(p);
535					goto nextproc;
536				}
537
538				/*
539				 * If the system is under memory stress,
540				 * or if we are swapping
541				 * idle processes >= swap_idle_threshold2,
542				 * then swap the process out.
543				 */
544				if (((action & VM_SWAP_NORMAL) == 0) &&
545				    (((action & VM_SWAP_IDLE) == 0) ||
546				    (kg->kg_slptime < swap_idle_threshold2))) {
547					mtx_unlock_spin(&sched_lock);
548					PROC_UNLOCK(p);
549					goto nextproc;
550				}
551				if (minslptime > kg->kg_slptime)
552					minslptime = kg->kg_slptime;
553			}
554
555			mtx_unlock_spin(&sched_lock);
556			++vm->vm_refcnt;
557			/*
558			 * do not swapout a process that
559			 * is waiting for VM
560			 * data structures there is a
561			 * possible deadlock.
562			 */
563			if (lockmgr(&vm->vm_map.lock,
564					LK_EXCLUSIVE | LK_NOWAIT,
565					NULL, curthread)) {
566				vmspace_free(vm);
567				PROC_UNLOCK(p);
568				goto nextproc;
569			}
570			vm_map_unlock(&vm->vm_map);
571			/*
572			 * If the process has been asleep for awhile and had
573			 * most of its pages taken away already, swap it out.
574			 */
575			if ((action & VM_SWAP_NORMAL) ||
576				((action & VM_SWAP_IDLE) &&
577				 (minslptime > swap_idle_threshold2))) {
578				sx_sunlock(&allproc_lock);
579				swapout(p);
580				vmspace_free(vm);
581				didswap++;
582				goto retry;
583			}
584			PROC_UNLOCK(p);
585			vmspace_free(vm);
586		}
587nextproc:
588		continue;
589	}
590	sx_sunlock(&allproc_lock);
591	/*
592	 * If we swapped something out, and another process needed memory,
593	 * then wakeup the sched process.
594	 */
595	if (didswap)
596		wakeup(&proc0);
597}
598
599static void
600swapout(p)
601	struct proc *p;
602{
603	struct thread *td;
604
605	PROC_LOCK_ASSERT(p, MA_OWNED);
606#if defined(SWAP_DEBUG)
607	printf("swapping out %d\n", p->p_pid);
608#endif
609	++p->p_stats->p_ru.ru_nswap;
610	/*
611	 * remember the process resident count
612	 */
613	p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
614
615	mtx_lock_spin(&sched_lock);
616	p->p_sflag &= ~PS_INMEM;
617	p->p_sflag |= PS_SWAPPING;
618	PROC_UNLOCK(p);
619	FOREACH_THREAD_IN_PROC (p, td)
620		if (td->td_proc->p_stat == SRUN)	/* XXXKSE */
621			remrunqueue(td);	/* XXXKSE */
622	mtx_unlock_spin(&sched_lock);
623
624	pmap_swapout_proc(p);
625	FOREACH_THREAD_IN_PROC(p, td)
626		pmap_swapout_thread(td);
627
628	mtx_lock_spin(&sched_lock);
629	p->p_sflag &= ~PS_SWAPPING;
630	p->p_swtime = 0;
631	mtx_unlock_spin(&sched_lock);
632}
633#endif /* !NO_SWAPPING */
634