vm_glue.c revision 83366
1/*
2 * Copyright (c) 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: @(#)vm_glue.c	8.6 (Berkeley) 1/5/94
37 *
38 *
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
41 *
42 * Permission to use, copy, modify and distribute this software and
43 * its documentation is hereby granted, provided that both the copyright
44 * notice and this permission notice appear in all copies of the
45 * software, derivative works or modified versions, and any portions
46 * thereof, and that both notices appear in supporting documentation.
47 *
48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51 *
52 * Carnegie Mellon requests users of this software to return to
53 *
54 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55 *  School of Computer Science
56 *  Carnegie Mellon University
57 *  Pittsburgh PA 15213-3890
58 *
59 * any improvements or extensions that they make and grant Carnegie the
60 * rights to redistribute these changes.
61 *
62 * $FreeBSD: head/sys/vm/vm_glue.c 83366 2001-09-12 08:38:13Z julian $
63 */
64
65#include "opt_rlimit.h"
66#include "opt_vm.h"
67
68#include <sys/param.h>
69#include <sys/systm.h>
70#include <sys/lock.h>
71#include <sys/mutex.h>
72#include <sys/proc.h>
73#include <sys/resourcevar.h>
74#include <sys/shm.h>
75#include <sys/vmmeter.h>
76#include <sys/sx.h>
77#include <sys/sysctl.h>
78
79#include <sys/kernel.h>
80#include <sys/ktr.h>
81#include <sys/unistd.h>
82
83#include <machine/limits.h>
84
85#include <vm/vm.h>
86#include <vm/vm_param.h>
87#include <vm/pmap.h>
88#include <vm/vm_map.h>
89#include <vm/vm_page.h>
90#include <vm/vm_pageout.h>
91#include <vm/vm_kern.h>
92#include <vm/vm_extern.h>
93
94#include <sys/user.h>
95
96extern int maxslp;
97
98/*
99 * System initialization
100 *
101 * Note: proc0 from proc.h
102 */
103
104static void vm_init_limits __P((void *));
105SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0)
106
107/*
108 * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
109 *
110 * Note: run scheduling should be divorced from the vm system.
111 */
112static void scheduler __P((void *));
113SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL)
114
115
116static void swapout __P((struct proc *));
117
118int
119kernacc(addr, len, rw)
120	caddr_t addr;
121	int len, rw;
122{
123	boolean_t rv;
124	vm_offset_t saddr, eaddr;
125	vm_prot_t prot;
126
127	KASSERT((rw & (~VM_PROT_ALL)) == 0,
128	    ("illegal ``rw'' argument to kernacc (%x)\n", rw));
129	prot = rw;
130	saddr = trunc_page((vm_offset_t)addr);
131	eaddr = round_page((vm_offset_t)addr + len);
132	vm_map_lock_read(kernel_map);
133	rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
134	vm_map_unlock_read(kernel_map);
135	return (rv == TRUE);
136}
137
138int
139useracc(addr, len, rw)
140	caddr_t addr;
141	int len, rw;
142{
143	boolean_t rv;
144	vm_prot_t prot;
145	vm_map_t map;
146	vm_map_entry_t save_hint;
147
148	GIANT_REQUIRED;
149
150	KASSERT((rw & (~VM_PROT_ALL)) == 0,
151	    ("illegal ``rw'' argument to useracc (%x)\n", rw));
152	prot = rw;
153	/*
154	 * XXX - check separately to disallow access to user area and user
155	 * page tables - they are in the map.
156	 *
157	 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max.  It was once
158	 * only used (as an end address) in trap.c.  Use it as an end address
159	 * here too.  This bogusness has spread.  I just fixed where it was
160	 * used as a max in vm_mmap.c.
161	 */
162	if ((vm_offset_t) addr + len > /* XXX */ VM_MAXUSER_ADDRESS
163	    || (vm_offset_t) addr + len < (vm_offset_t) addr) {
164		return (FALSE);
165	}
166	map = &curproc->p_vmspace->vm_map;
167	vm_map_lock_read(map);
168	/*
169	 * We save the map hint, and restore it.  Useracc appears to distort
170	 * the map hint unnecessarily.
171	 */
172	save_hint = map->hint;
173	rv = vm_map_check_protection(map,
174	    trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len), prot);
175	map->hint = save_hint;
176	vm_map_unlock_read(map);
177
178	return (rv == TRUE);
179}
180
181void
182vslock(addr, len)
183	caddr_t addr;
184	u_int len;
185{
186	GIANT_REQUIRED;
187	vm_map_pageable(&curproc->p_vmspace->vm_map,
188	    trunc_page((vm_offset_t)addr),
189	    round_page((vm_offset_t)addr + len), FALSE);
190}
191
192void
193vsunlock(addr, len)
194	caddr_t addr;
195	u_int len;
196{
197	GIANT_REQUIRED;
198	vm_map_pageable(&curproc->p_vmspace->vm_map,
199	    trunc_page((vm_offset_t)addr),
200	    round_page((vm_offset_t)addr + len), TRUE);
201}
202
203/*
204 * Implement fork's actions on an address space.
205 * Here we arrange for the address space to be copied or referenced,
206 * allocate a user struct (pcb and kernel stack), then call the
207 * machine-dependent layer to fill those in and make the new process
208 * ready to run.  The new process is set up so that it returns directly
209 * to user mode to avoid stack copying and relocation problems.
210 */
211void
212vm_forkproc(td, p2, flags)
213	struct thread *td;
214	struct proc *p2;
215	int flags;
216{
217	struct proc *p1 = td->td_proc;
218	struct user *up;
219
220	GIANT_REQUIRED;
221
222	if ((flags & RFPROC) == 0) {
223		/*
224		 * Divorce the memory, if it is shared, essentially
225		 * this changes shared memory amongst threads, into
226		 * COW locally.
227		 */
228		if ((flags & RFMEM) == 0) {
229			if (p1->p_vmspace->vm_refcnt > 1) {
230				vmspace_unshare(p1);
231			}
232		}
233		cpu_fork(td, p2, flags);
234		return;
235	}
236
237	if (flags & RFMEM) {
238		p2->p_vmspace = p1->p_vmspace;
239		p1->p_vmspace->vm_refcnt++;
240	}
241
242	while (vm_page_count_severe()) {
243		VM_WAIT;
244	}
245
246	if ((flags & RFMEM) == 0) {
247		p2->p_vmspace = vmspace_fork(p1->p_vmspace);
248
249		pmap_pinit2(vmspace_pmap(p2->p_vmspace));
250
251		if (p1->p_vmspace->vm_shm)
252			shmfork(p1, p2);
253	}
254
255	pmap_new_proc(p2);
256	pmap_new_thread(&p2->p_thread);		/* Initial thread */
257
258	/* XXXKSE this is unsatisfactory but should be adequate */
259	up = p2->p_uarea;
260
261	/*
262	 * p_stats currently points at fields in the user struct
263	 * but not at &u, instead at p_addr. Copy parts of
264	 * p_stats; zero the rest of p_stats (statistics).
265	 *
266	 * If procsig->ps_refcnt is 1 and p2->p_sigacts is NULL we dont' need
267	 * to share sigacts, so we use the up->u_sigacts.
268	 */
269	p2->p_stats = &up->u_stats;
270	if (p2->p_sigacts == NULL) {
271		if (p2->p_procsig->ps_refcnt != 1)
272			printf ("PID:%d NULL sigacts with refcnt not 1!\n",p2->p_pid);
273		p2->p_sigacts = &up->u_sigacts;
274		up->u_sigacts = *p1->p_sigacts;
275	}
276
277	bzero(&up->u_stats.pstat_startzero,
278	    (unsigned) ((caddr_t) &up->u_stats.pstat_endzero -
279		(caddr_t) &up->u_stats.pstat_startzero));
280	bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy,
281	    ((caddr_t) &up->u_stats.pstat_endcopy -
282		(caddr_t) &up->u_stats.pstat_startcopy));
283
284
285	/*
286	 * cpu_fork will copy and update the pcb, set up the kernel stack,
287	 * and make the child ready to run.
288	 */
289	cpu_fork(td, p2, flags);
290}
291
292/*
293 * Called after process has been wait(2)'ed apon and is being reaped.
294 * The idea is to reclaim resources that we could not reclaim while
295 * the process was still executing.
296 */
297void
298vm_waitproc(p)
299	struct proc *p;
300{
301	struct thread *td;
302
303	GIANT_REQUIRED;
304	cpu_wait(p);
305	pmap_dispose_proc(p);		/* drop per-process resources */
306	FOREACH_THREAD_IN_PROC(p, td)
307		pmap_dispose_thread(td);
308	vmspace_free(p->p_vmspace);	/* and clean-out the vmspace */
309}
310
311/*
312 * Set default limits for VM system.
313 * Called for proc 0, and then inherited by all others.
314 *
315 * XXX should probably act directly on proc0.
316 */
317static void
318vm_init_limits(udata)
319	void *udata;
320{
321	struct proc *p = udata;
322	int rss_limit;
323
324	/*
325	 * Set up the initial limits on process VM. Set the maximum resident
326	 * set size to be half of (reasonably) available memory.  Since this
327	 * is a soft limit, it comes into effect only when the system is out
328	 * of memory - half of main memory helps to favor smaller processes,
329	 * and reduces thrashing of the object cache.
330	 */
331	p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
332	p->p_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ;
333	p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
334	p->p_rlimit[RLIMIT_DATA].rlim_max = MAXDSIZ;
335	/* limit the limit to no less than 2MB */
336	rss_limit = max(cnt.v_free_count, 512);
337	p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
338	p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
339}
340
341/*
342 * Must be called with the proc struc mutex held.
343 */
344void
345faultin(p)
346	struct proc *p;
347{
348	struct thread *td;
349	GIANT_REQUIRED;
350
351	PROC_LOCK_ASSERT(p, MA_OWNED);
352	mtx_lock_spin(&sched_lock);
353	if ((p->p_sflag & PS_INMEM) == 0) {
354		++p->p_lock;
355		mtx_unlock_spin(&sched_lock);
356		PROC_UNLOCK(p);
357
358		pmap_swapin_proc(p);
359		FOREACH_THREAD_IN_PROC (p, td)
360			pmap_swapin_thread(td);
361
362		PROC_LOCK(p);
363		mtx_lock_spin(&sched_lock);
364		FOREACH_THREAD_IN_PROC (p, td)
365			if (td->td_proc->p_stat == SRUN)	/* XXXKSE */
366				setrunqueue(td);
367
368		p->p_sflag |= PS_INMEM;
369
370		/* undo the effect of setting SLOCK above */
371		--p->p_lock;
372	}
373	mtx_unlock_spin(&sched_lock);
374}
375
376/*
377 * This swapin algorithm attempts to swap-in processes only if there
378 * is enough space for them.  Of course, if a process waits for a long
379 * time, it will be swapped in anyway.
380 *
381 *  XXXKSE - KSEGRP with highest priority counts..
382 *
383 * Giant is still held at this point, to be released in tsleep.
384 */
385/* ARGSUSED*/
386static void
387scheduler(dummy)
388	void *dummy;
389{
390	struct proc *p;
391	int pri;
392	struct proc *pp;
393	int ppri;
394
395	mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED);
396	/* GIANT_REQUIRED */
397
398loop:
399	if (vm_page_count_min()) {
400		VM_WAIT;
401		goto loop;
402	}
403
404	pp = NULL;
405	ppri = INT_MIN;
406	sx_slock(&allproc_lock);
407	FOREACH_PROC_IN_SYSTEM(p) {
408		struct ksegrp *kg;
409		mtx_lock_spin(&sched_lock);
410		if (p->p_stat == SRUN
411		&& (p->p_sflag & (PS_INMEM | PS_SWAPPING)) == 0) {
412			/* Find the minimum sleeptime for the process */
413			FOREACH_KSEGRP_IN_PROC(p, kg) {
414				pri = p->p_swtime + kg->kg_slptime;
415				if ((p->p_sflag & PS_SWAPINREQ) == 0) {
416					pri -= kg->kg_nice * 8;
417				}
418
419
420				/*
421				 * if this ksegrp is higher priority
422				 * and there is enough space, then select
423				 * this process instead of the previous
424				 * selection.
425				 */
426				if (pri > ppri) {
427					pp = p;
428					ppri = pri;
429				}
430			}
431		}
432		mtx_unlock_spin(&sched_lock);
433	}
434	sx_sunlock(&allproc_lock);
435
436	/*
437	 * Nothing to do, back to sleep.
438	 */
439	if ((p = pp) == NULL) {
440		tsleep(&proc0, PVM, "sched", maxslp * hz / 2);
441		goto loop;
442	}
443	mtx_lock_spin(&sched_lock);
444	p->p_sflag &= ~PS_SWAPINREQ;
445	mtx_unlock_spin(&sched_lock);
446
447	/*
448	 * We would like to bring someone in. (only if there is space).
449	 */
450	PROC_LOCK(p);
451	faultin(p);
452	PROC_UNLOCK(p);
453	mtx_lock_spin(&sched_lock);
454	p->p_swtime = 0;
455	mtx_unlock_spin(&sched_lock);
456	goto loop;
457}
458
459#ifndef NO_SWAPPING
460
461/*
462 * Swap_idle_threshold1 is the guaranteed swapped in time for a process
463 */
464static int swap_idle_threshold1 = 2;
465SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1,
466	CTLFLAG_RW, &swap_idle_threshold1, 0, "");
467
468/*
469 * Swap_idle_threshold2 is the time that a process can be idle before
470 * it will be swapped out, if idle swapping is enabled.
471 */
472static int swap_idle_threshold2 = 10;
473SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2,
474	CTLFLAG_RW, &swap_idle_threshold2, 0, "");
475
476/*
477 * Swapout is driven by the pageout daemon.  Very simple, we find eligible
478 * procs and unwire their u-areas.  We try to always "swap" at least one
479 * process in case we need the room for a swapin.
480 * If any procs have been sleeping/stopped for at least maxslp seconds,
481 * they are swapped.  Else, we swap the longest-sleeping or stopped process,
482 * if any, otherwise the longest-resident process.
483 */
484void
485swapout_procs(action)
486int action;
487{
488	struct proc *p;
489	struct ksegrp *kg;
490	struct proc *outp, *outp2;
491	int outpri, outpri2;
492	int didswap = 0;
493
494	GIANT_REQUIRED;
495
496	outp = outp2 = NULL;
497	outpri = outpri2 = INT_MIN;
498retry:
499	sx_slock(&allproc_lock);
500	LIST_FOREACH(p, &allproc, p_list) {
501		struct vmspace *vm;
502		int minslptime = 100000;
503
504		PROC_LOCK(p);
505		if (p->p_lock != 0 ||
506		    (p->p_flag & (P_TRACED|P_SYSTEM|P_WEXIT)) != 0) {
507			PROC_UNLOCK(p);
508			continue;
509		}
510		/*
511		 * only aiod changes vmspace, however it will be
512		 * skipped because of the if statement above checking
513		 * for P_SYSTEM
514		 */
515		vm = p->p_vmspace;
516		mtx_lock_spin(&sched_lock);
517		if ((p->p_sflag & (PS_INMEM|PS_SWAPPING)) != PS_INMEM) {
518			mtx_unlock_spin(&sched_lock);
519			PROC_UNLOCK(p);
520			continue;
521		}
522
523		switch (p->p_stat) {
524		default:
525			mtx_unlock_spin(&sched_lock);
526			PROC_UNLOCK(p);
527			continue;
528
529		case SSLEEP:
530		case SSTOP:
531			/*
532			 * do not swapout a realtime process
533			 * Check all the thread groups..
534			 */
535			FOREACH_KSEGRP_IN_PROC(p, kg) {
536				if (PRI_IS_REALTIME(kg->kg_pri.pri_class)) {
537					mtx_unlock_spin(&sched_lock);
538					PROC_UNLOCK(p);
539					goto nextproc;
540				}
541
542				/*
543				 * Do not swapout a process waiting
544				 * on a critical event of some kind.
545				 * Also guarantee swap_idle_threshold1
546				 * time in memory.
547				 */
548				if (((kg->kg_pri.pri_level) < PSOCK) ||
549				    (kg->kg_slptime < swap_idle_threshold1)) {
550					mtx_unlock_spin(&sched_lock);
551					PROC_UNLOCK(p);
552					goto nextproc;
553				}
554
555				/*
556				 * If the system is under memory stress,
557				 * or if we are swapping
558				 * idle processes >= swap_idle_threshold2,
559				 * then swap the process out.
560				 */
561				if (((action & VM_SWAP_NORMAL) == 0) &&
562				    (((action & VM_SWAP_IDLE) == 0) ||
563				    (kg->kg_slptime < swap_idle_threshold2))) {
564					mtx_unlock_spin(&sched_lock);
565					PROC_UNLOCK(p);
566					goto nextproc;
567				}
568				if (minslptime > kg->kg_slptime)
569					minslptime = kg->kg_slptime;
570			}
571
572			mtx_unlock_spin(&sched_lock);
573			++vm->vm_refcnt;
574			/*
575			 * do not swapout a process that
576			 * is waiting for VM
577			 * data structures there is a
578			 * possible deadlock.
579			 */
580			if (lockmgr(&vm->vm_map.lock,
581					LK_EXCLUSIVE | LK_NOWAIT,
582					NULL, curthread)) {
583				vmspace_free(vm);
584				PROC_UNLOCK(p);
585				goto nextproc;
586			}
587			vm_map_unlock(&vm->vm_map);
588			/*
589			 * If the process has been asleep for awhile and had
590			 * most of its pages taken away already, swap it out.
591			 */
592			if ((action & VM_SWAP_NORMAL) ||
593				((action & VM_SWAP_IDLE) &&
594				 (minslptime > swap_idle_threshold2))) {
595				sx_sunlock(&allproc_lock);
596				swapout(p);
597				vmspace_free(vm);
598				didswap++;
599				goto retry;
600			}
601			PROC_UNLOCK(p);
602			vmspace_free(vm);
603		}
604nextproc:
605	}
606	sx_sunlock(&allproc_lock);
607	/*
608	 * If we swapped something out, and another process needed memory,
609	 * then wakeup the sched process.
610	 */
611	if (didswap)
612		wakeup(&proc0);
613}
614
615static void
616swapout(p)
617	struct proc *p;
618{
619	struct thread *td;
620
621	PROC_LOCK_ASSERT(p, MA_OWNED);
622#if defined(SWAP_DEBUG)
623	printf("swapping out %d\n", p->p_pid);
624#endif
625	++p->p_stats->p_ru.ru_nswap;
626	/*
627	 * remember the process resident count
628	 */
629	p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
630
631	mtx_lock_spin(&sched_lock);
632	p->p_sflag &= ~PS_INMEM;
633	p->p_sflag |= PS_SWAPPING;
634	PROC_UNLOCK_NOSWITCH(p);
635	FOREACH_THREAD_IN_PROC (p, td)
636		if (td->td_proc->p_stat == SRUN)	/* XXXKSE */
637			remrunqueue(td);	/* XXXKSE */
638	mtx_unlock_spin(&sched_lock);
639
640	pmap_swapout_proc(p);
641	FOREACH_THREAD_IN_PROC(p, td)
642		pmap_swapout_thread(td);
643
644	mtx_lock_spin(&sched_lock);
645	p->p_sflag &= ~PS_SWAPPING;
646	p->p_swtime = 0;
647	mtx_unlock_spin(&sched_lock);
648}
649#endif /* !NO_SWAPPING */
650