vm_glue.c revision 116355
1/*
2 * Copyright (c) 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: @(#)vm_glue.c	8.6 (Berkeley) 1/5/94
37 *
38 *
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
41 *
42 * Permission to use, copy, modify and distribute this software and
43 * its documentation is hereby granted, provided that both the copyright
44 * notice and this permission notice appear in all copies of the
45 * software, derivative works or modified versions, and any portions
46 * thereof, and that both notices appear in supporting documentation.
47 *
48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51 *
52 * Carnegie Mellon requests users of this software to return to
53 *
54 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55 *  School of Computer Science
56 *  Carnegie Mellon University
57 *  Pittsburgh PA 15213-3890
58 *
59 * any improvements or extensions that they make and grant Carnegie the
60 * rights to redistribute these changes.
61 */
62
63#include <sys/cdefs.h>
64__FBSDID("$FreeBSD: head/sys/vm/vm_glue.c 116355 2003-06-14 23:23:55Z alc $");
65
66#include "opt_vm.h"
67
68#include <sys/param.h>
69#include <sys/systm.h>
70#include <sys/limits.h>
71#include <sys/lock.h>
72#include <sys/mutex.h>
73#include <sys/proc.h>
74#include <sys/resourcevar.h>
75#include <sys/shm.h>
76#include <sys/vmmeter.h>
77#include <sys/sx.h>
78#include <sys/sysctl.h>
79
80#include <sys/kernel.h>
81#include <sys/ktr.h>
82#include <sys/unistd.h>
83
84#include <vm/vm.h>
85#include <vm/vm_param.h>
86#include <vm/pmap.h>
87#include <vm/vm_map.h>
88#include <vm/vm_page.h>
89#include <vm/vm_pageout.h>
90#include <vm/vm_object.h>
91#include <vm/vm_kern.h>
92#include <vm/vm_extern.h>
93#include <vm/vm_pager.h>
94#include <vm/swap_pager.h>
95
96#include <sys/user.h>
97
98extern int maxslp;
99
100/*
101 * System initialization
102 *
103 * Note: proc0 from proc.h
104 */
105static void vm_init_limits(void *);
106SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0)
107
108/*
109 * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
110 *
111 * Note: run scheduling should be divorced from the vm system.
112 */
113static void scheduler(void *);
114SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL)
115
116#ifndef NO_SWAPPING
117static void swapout(struct proc *);
118static void vm_proc_swapin(struct proc *p);
119static void vm_proc_swapout(struct proc *p);
120#endif
121
122/*
123 * MPSAFE
124 *
125 * WARNING!  This code calls vm_map_check_protection() which only checks
126 * the associated vm_map_entry range.  It does not determine whether the
127 * contents of the memory is actually readable or writable.  In most cases
128 * just checking the vm_map_entry is sufficient within the kernel's address
129 * space.
130 */
131int
132kernacc(addr, len, rw)
133	void *addr;
134	int len, rw;
135{
136	boolean_t rv;
137	vm_offset_t saddr, eaddr;
138	vm_prot_t prot;
139
140	KASSERT((rw & ~VM_PROT_ALL) == 0,
141	    ("illegal ``rw'' argument to kernacc (%x)\n", rw));
142	prot = rw;
143	saddr = trunc_page((vm_offset_t)addr);
144	eaddr = round_page((vm_offset_t)addr + len);
145	rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
146	return (rv == TRUE);
147}
148
149/*
150 * MPSAFE
151 *
152 * WARNING!  This code calls vm_map_check_protection() which only checks
153 * the associated vm_map_entry range.  It does not determine whether the
154 * contents of the memory is actually readable or writable.  vmapbuf(),
155 * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
156 * used in conjuction with this call.
157 */
158int
159useracc(addr, len, rw)
160	void *addr;
161	int len, rw;
162{
163	boolean_t rv;
164	vm_prot_t prot;
165	vm_map_t map;
166
167	KASSERT((rw & ~VM_PROT_ALL) == 0,
168	    ("illegal ``rw'' argument to useracc (%x)\n", rw));
169	prot = rw;
170	map = &curproc->p_vmspace->vm_map;
171	if ((vm_offset_t)addr + len > vm_map_max(map) ||
172	    (vm_offset_t)addr + len < (vm_offset_t)addr) {
173		return (FALSE);
174	}
175	rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
176	    round_page((vm_offset_t)addr + len), prot);
177	return (rv == TRUE);
178}
179
180/*
181 * MPSAFE
182 */
183void
184vslock(addr, len)
185	void *addr;
186	u_int len;
187{
188
189	vm_map_wire(&curproc->p_vmspace->vm_map, trunc_page((vm_offset_t)addr),
190	    round_page((vm_offset_t)addr + len), FALSE);
191}
192
193/*
194 * MPSAFE
195 */
196void
197vsunlock(addr, len)
198	void *addr;
199	u_int len;
200{
201
202	vm_map_unwire(&curproc->p_vmspace->vm_map,
203	    trunc_page((vm_offset_t)addr),
204	    round_page((vm_offset_t)addr + len), FALSE);
205}
206
207/*
208 * Create the U area for a new process.
209 * This routine directly affects the fork perf for a process.
210 */
211void
212vm_proc_new(struct proc *p)
213{
214	vm_page_t ma[UAREA_PAGES];
215	vm_object_t upobj;
216	vm_offset_t up;
217	vm_page_t m;
218	u_int i;
219
220	/*
221	 * Allocate object for the upage.
222	 */
223	upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES);
224	p->p_upages_obj = upobj;
225
226	/*
227	 * Get a kernel virtual address for the U area for this process.
228	 */
229	up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE);
230	if (up == 0)
231		panic("vm_proc_new: upage allocation failed");
232	p->p_uarea = (struct user *)up;
233
234	for (i = 0; i < UAREA_PAGES; i++) {
235		/*
236		 * Get a uarea page.
237		 */
238		m = vm_page_grab(upobj, i,
239		    VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
240		ma[i] = m;
241
242		vm_page_lock_queues();
243		vm_page_wakeup(m);
244		vm_page_flag_clear(m, PG_ZERO);
245		m->valid = VM_PAGE_BITS_ALL;
246		vm_page_unlock_queues();
247	}
248
249	/*
250	 * Enter the pages into the kernel address space.
251	 */
252	pmap_qenter(up, ma, UAREA_PAGES);
253}
254
255/*
256 * Dispose the U area for a process that has exited.
257 * This routine directly impacts the exit perf of a process.
258 * XXX proc_zone is marked UMA_ZONE_NOFREE, so this should never be called.
259 */
260void
261vm_proc_dispose(struct proc *p)
262{
263	vm_object_t upobj;
264	vm_offset_t up;
265	vm_page_t m;
266
267	upobj = p->p_upages_obj;
268	VM_OBJECT_LOCK(upobj);
269	if (upobj->resident_page_count != UAREA_PAGES)
270		panic("vm_proc_dispose: incorrect number of pages in upobj");
271	vm_page_lock_queues();
272	while ((m = TAILQ_FIRST(&upobj->memq)) != NULL) {
273		vm_page_busy(m);
274		vm_page_unwire(m, 0);
275		vm_page_free(m);
276	}
277	vm_page_unlock_queues();
278	VM_OBJECT_UNLOCK(upobj);
279	up = (vm_offset_t)p->p_uarea;
280	pmap_qremove(up, UAREA_PAGES);
281	kmem_free(kernel_map, up, UAREA_PAGES * PAGE_SIZE);
282	vm_object_deallocate(upobj);
283}
284
285#ifndef NO_SWAPPING
286/*
287 * Allow the U area for a process to be prejudicially paged out.
288 */
289static void
290vm_proc_swapout(struct proc *p)
291{
292	vm_object_t upobj;
293	vm_offset_t up;
294	vm_page_t m;
295
296	upobj = p->p_upages_obj;
297	VM_OBJECT_LOCK(upobj);
298	if (upobj->resident_page_count != UAREA_PAGES)
299		panic("vm_proc_dispose: incorrect number of pages in upobj");
300	vm_page_lock_queues();
301	TAILQ_FOREACH(m, &upobj->memq, listq) {
302		vm_page_dirty(m);
303		vm_page_unwire(m, 0);
304	}
305	vm_page_unlock_queues();
306	VM_OBJECT_UNLOCK(upobj);
307	up = (vm_offset_t)p->p_uarea;
308	pmap_qremove(up, UAREA_PAGES);
309}
310
311/*
312 * Bring the U area for a specified process back in.
313 */
314static void
315vm_proc_swapin(struct proc *p)
316{
317	vm_page_t ma[UAREA_PAGES];
318	vm_object_t upobj;
319	vm_offset_t up;
320	vm_page_t m;
321	int rv;
322	int i;
323
324	upobj = p->p_upages_obj;
325	VM_OBJECT_LOCK(upobj);
326	for (i = 0; i < UAREA_PAGES; i++) {
327		m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
328		if (m->valid != VM_PAGE_BITS_ALL) {
329			rv = vm_pager_get_pages(upobj, &m, 1, 0);
330			if (rv != VM_PAGER_OK)
331				panic("vm_proc_swapin: cannot get upage");
332		}
333		ma[i] = m;
334	}
335	if (upobj->resident_page_count != UAREA_PAGES)
336		panic("vm_proc_swapin: lost pages from upobj");
337	vm_page_lock_queues();
338	TAILQ_FOREACH(m, &upobj->memq, listq) {
339		m->valid = VM_PAGE_BITS_ALL;
340		vm_page_wire(m);
341		vm_page_wakeup(m);
342	}
343	vm_page_unlock_queues();
344	VM_OBJECT_UNLOCK(upobj);
345	up = (vm_offset_t)p->p_uarea;
346	pmap_qenter(up, ma, UAREA_PAGES);
347}
348
349/*
350 * Swap in the UAREAs of all processes swapped out to the given device.
351 * The pages in the UAREA are marked dirty and their swap metadata is freed.
352 */
353void
354vm_proc_swapin_all(int devidx)
355{
356	struct proc *p;
357	vm_object_t object;
358	vm_page_t m;
359
360retry:
361	sx_slock(&allproc_lock);
362	FOREACH_PROC_IN_SYSTEM(p) {
363		PROC_LOCK(p);
364		object = p->p_upages_obj;
365		if (object != NULL) {
366			VM_OBJECT_LOCK(object);
367			if (swap_pager_isswapped(object, devidx)) {
368				VM_OBJECT_UNLOCK(object);
369				sx_sunlock(&allproc_lock);
370				faultin(p);
371				PROC_UNLOCK(p);
372				VM_OBJECT_LOCK(object);
373				vm_page_lock_queues();
374				TAILQ_FOREACH(m, &object->memq, listq)
375					vm_page_dirty(m);
376				vm_page_unlock_queues();
377				swap_pager_freespace(object, 0,
378				    object->un_pager.swp.swp_bcount);
379				VM_OBJECT_UNLOCK(object);
380				goto retry;
381			}
382			VM_OBJECT_UNLOCK(object);
383		}
384		PROC_UNLOCK(p);
385	}
386	sx_sunlock(&allproc_lock);
387}
388#endif
389
390#ifndef KSTACK_MAX_PAGES
391#define KSTACK_MAX_PAGES 32
392#endif
393
394/*
395 * Create the kernel stack (including pcb for i386) for a new thread.
396 * This routine directly affects the fork perf for a process and
397 * create performance for a thread.
398 */
399void
400vm_thread_new(struct thread *td, int pages)
401{
402	vm_object_t ksobj;
403	vm_offset_t ks;
404	vm_page_t m, ma[KSTACK_MAX_PAGES];
405	int i;
406
407	/* Bounds check */
408	if (pages <= 1)
409		pages = KSTACK_PAGES;
410	else if (pages > KSTACK_MAX_PAGES)
411		pages = KSTACK_MAX_PAGES;
412	/*
413	 * Allocate an object for the kstack.
414	 */
415	ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
416	td->td_kstack_obj = ksobj;
417	/*
418	 * Get a kernel virtual address for this thread's kstack.
419	 */
420	ks = kmem_alloc_nofault(kernel_map,
421	   (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
422	if (ks == 0)
423		panic("vm_thread_new: kstack allocation failed");
424	if (KSTACK_GUARD_PAGES != 0) {
425		pmap_qremove(ks, KSTACK_GUARD_PAGES);
426		ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
427	}
428	td->td_kstack = ks;
429	/*
430	 * Knowing the number of pages allocated is useful when you
431	 * want to deallocate them.
432	 */
433	td->td_kstack_pages = pages;
434	/*
435	 * For the length of the stack, link in a real page of ram for each
436	 * page of stack.
437	 */
438	VM_OBJECT_LOCK(ksobj);
439	for (i = 0; i < pages; i++) {
440		/*
441		 * Get a kernel stack page.
442		 */
443		m = vm_page_grab(ksobj, i,
444		    VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
445		ma[i] = m;
446		vm_page_lock_queues();
447		vm_page_wakeup(m);
448		m->valid = VM_PAGE_BITS_ALL;
449		vm_page_unlock_queues();
450	}
451	VM_OBJECT_UNLOCK(ksobj);
452	pmap_qenter(ks, ma, pages);
453}
454
455/*
456 * Dispose of a thread's kernel stack.
457 */
458void
459vm_thread_dispose(struct thread *td)
460{
461	vm_object_t ksobj;
462	vm_offset_t ks;
463	vm_page_t m;
464	int i, pages;
465
466	pages = td->td_kstack_pages;
467	ksobj = td->td_kstack_obj;
468	ks = td->td_kstack;
469	pmap_qremove(ks, pages);
470	VM_OBJECT_LOCK(ksobj);
471	for (i = 0; i < pages; i++) {
472		m = vm_page_lookup(ksobj, i);
473		if (m == NULL)
474			panic("vm_thread_dispose: kstack already missing?");
475		vm_page_lock_queues();
476		vm_page_busy(m);
477		vm_page_unwire(m, 0);
478		vm_page_free(m);
479		vm_page_unlock_queues();
480	}
481	VM_OBJECT_UNLOCK(ksobj);
482	vm_object_deallocate(ksobj);
483	kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
484	    (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
485}
486
487/*
488 * Allow a thread's kernel stack to be paged out.
489 */
490void
491vm_thread_swapout(struct thread *td)
492{
493	vm_object_t ksobj;
494	vm_page_t m;
495	int i, pages;
496
497#ifdef	__alpha
498	/*
499	 * Make sure we aren't fpcurthread.
500	 */
501	alpha_fpstate_save(td, 1);
502#endif
503	pages = td->td_kstack_pages;
504	ksobj = td->td_kstack_obj;
505	pmap_qremove(td->td_kstack, pages);
506	VM_OBJECT_LOCK(ksobj);
507	for (i = 0; i < pages; i++) {
508		m = vm_page_lookup(ksobj, i);
509		if (m == NULL)
510			panic("vm_thread_swapout: kstack already missing?");
511		vm_page_lock_queues();
512		vm_page_dirty(m);
513		vm_page_unwire(m, 0);
514		vm_page_unlock_queues();
515	}
516	VM_OBJECT_UNLOCK(ksobj);
517}
518
519/*
520 * Bring the kernel stack for a specified thread back in.
521 */
522void
523vm_thread_swapin(struct thread *td)
524{
525	vm_object_t ksobj;
526	vm_page_t m, ma[KSTACK_MAX_PAGES];
527	int i, pages, rv;
528
529	pages = td->td_kstack_pages;
530	ksobj = td->td_kstack_obj;
531	VM_OBJECT_LOCK(ksobj);
532	for (i = 0; i < pages; i++) {
533		m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
534		if (m->valid != VM_PAGE_BITS_ALL) {
535			rv = vm_pager_get_pages(ksobj, &m, 1, 0);
536			if (rv != VM_PAGER_OK)
537				panic("vm_thread_swapin: cannot get kstack for proc: %d", td->td_proc->p_pid);
538			m = vm_page_lookup(ksobj, i);
539			m->valid = VM_PAGE_BITS_ALL;
540		}
541		ma[i] = m;
542		vm_page_lock_queues();
543		vm_page_wire(m);
544		vm_page_wakeup(m);
545		vm_page_unlock_queues();
546	}
547	VM_OBJECT_UNLOCK(ksobj);
548	pmap_qenter(td->td_kstack, ma, pages);
549#ifdef	__alpha
550	/*
551	 * The pcb may be at a different physical address now so cache the
552	 * new address.
553	 */
554	td->td_md.md_pcbpaddr = (void *)vtophys((vm_offset_t)td->td_pcb);
555#endif
556}
557
558/*
559 * Set up a variable-sized alternate kstack.
560 */
561void
562vm_thread_new_altkstack(struct thread *td, int pages)
563{
564
565	td->td_altkstack = td->td_kstack;
566	td->td_altkstack_obj = td->td_kstack_obj;
567	td->td_altkstack_pages = td->td_kstack_pages;
568
569	vm_thread_new(td, pages);
570}
571
572/*
573 * Restore the original kstack.
574 */
575void
576vm_thread_dispose_altkstack(struct thread *td)
577{
578
579	vm_thread_dispose(td);
580
581	td->td_kstack = td->td_altkstack;
582	td->td_kstack_obj = td->td_altkstack_obj;
583	td->td_kstack_pages = td->td_altkstack_pages;
584	td->td_altkstack = 0;
585	td->td_altkstack_obj = NULL;
586	td->td_altkstack_pages = 0;
587}
588
589/*
590 * Implement fork's actions on an address space.
591 * Here we arrange for the address space to be copied or referenced,
592 * allocate a user struct (pcb and kernel stack), then call the
593 * machine-dependent layer to fill those in and make the new process
594 * ready to run.  The new process is set up so that it returns directly
595 * to user mode to avoid stack copying and relocation problems.
596 */
597void
598vm_forkproc(td, p2, td2, flags)
599	struct thread *td;
600	struct proc *p2;
601	struct thread *td2;
602	int flags;
603{
604	struct proc *p1 = td->td_proc;
605	struct user *up;
606
607	GIANT_REQUIRED;
608
609	if ((flags & RFPROC) == 0) {
610		/*
611		 * Divorce the memory, if it is shared, essentially
612		 * this changes shared memory amongst threads, into
613		 * COW locally.
614		 */
615		if ((flags & RFMEM) == 0) {
616			if (p1->p_vmspace->vm_refcnt > 1) {
617				vmspace_unshare(p1);
618			}
619		}
620		cpu_fork(td, p2, td2, flags);
621		return;
622	}
623
624	if (flags & RFMEM) {
625		p2->p_vmspace = p1->p_vmspace;
626		p1->p_vmspace->vm_refcnt++;
627	}
628
629	while (vm_page_count_severe()) {
630		VM_WAIT;
631	}
632
633	if ((flags & RFMEM) == 0) {
634		p2->p_vmspace = vmspace_fork(p1->p_vmspace);
635
636		pmap_pinit2(vmspace_pmap(p2->p_vmspace));
637
638		if (p1->p_vmspace->vm_shm)
639			shmfork(p1, p2);
640	}
641
642	/* XXXKSE this is unsatisfactory but should be adequate */
643	up = p2->p_uarea;
644	MPASS(p2->p_sigacts != NULL);
645
646	/*
647	 * p_stats currently points at fields in the user struct
648	 * but not at &u, instead at p_addr. Copy parts of
649	 * p_stats; zero the rest of p_stats (statistics).
650	 */
651	p2->p_stats = &up->u_stats;
652	bzero(&up->u_stats.pstat_startzero,
653	    (unsigned) ((caddr_t) &up->u_stats.pstat_endzero -
654		(caddr_t) &up->u_stats.pstat_startzero));
655	bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy,
656	    ((caddr_t) &up->u_stats.pstat_endcopy -
657		(caddr_t) &up->u_stats.pstat_startcopy));
658
659	/*
660	 * cpu_fork will copy and update the pcb, set up the kernel stack,
661	 * and make the child ready to run.
662	 */
663	cpu_fork(td, p2, td2, flags);
664}
665
666/*
667 * Called after process has been wait(2)'ed apon and is being reaped.
668 * The idea is to reclaim resources that we could not reclaim while
669 * the process was still executing.
670 */
671void
672vm_waitproc(p)
673	struct proc *p;
674{
675
676	GIANT_REQUIRED;
677	vmspace_exitfree(p);		/* and clean-out the vmspace */
678}
679
680/*
681 * Set default limits for VM system.
682 * Called for proc 0, and then inherited by all others.
683 *
684 * XXX should probably act directly on proc0.
685 */
686static void
687vm_init_limits(udata)
688	void *udata;
689{
690	struct proc *p = udata;
691	int rss_limit;
692
693	/*
694	 * Set up the initial limits on process VM. Set the maximum resident
695	 * set size to be half of (reasonably) available memory.  Since this
696	 * is a soft limit, it comes into effect only when the system is out
697	 * of memory - half of main memory helps to favor smaller processes,
698	 * and reduces thrashing of the object cache.
699	 */
700	p->p_rlimit[RLIMIT_STACK].rlim_cur = dflssiz;
701	p->p_rlimit[RLIMIT_STACK].rlim_max = maxssiz;
702	p->p_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz;
703	p->p_rlimit[RLIMIT_DATA].rlim_max = maxdsiz;
704	/* limit the limit to no less than 2MB */
705	rss_limit = max(cnt.v_free_count, 512);
706	p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
707	p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
708}
709
710void
711faultin(p)
712	struct proc *p;
713{
714#ifdef NO_SWAPPING
715
716	PROC_LOCK_ASSERT(p, MA_OWNED);
717	if ((p->p_sflag & PS_INMEM) == 0)
718		panic("faultin: proc swapped out with NO_SWAPPING!");
719#else /* !NO_SWAPPING */
720	struct thread *td;
721
722	GIANT_REQUIRED;
723	PROC_LOCK_ASSERT(p, MA_OWNED);
724	/*
725	 * If another process is swapping in this process,
726	 * just wait until it finishes.
727	 */
728	if (p->p_sflag & PS_SWAPPINGIN)
729		msleep(&p->p_sflag, &p->p_mtx, PVM, "faultin", 0);
730	else if ((p->p_sflag & PS_INMEM) == 0) {
731		/*
732		 * Don't let another thread swap process p out while we are
733		 * busy swapping it in.
734		 */
735		++p->p_lock;
736		mtx_lock_spin(&sched_lock);
737		p->p_sflag |= PS_SWAPPINGIN;
738		mtx_unlock_spin(&sched_lock);
739		PROC_UNLOCK(p);
740
741		vm_proc_swapin(p);
742		FOREACH_THREAD_IN_PROC(p, td)
743			vm_thread_swapin(td);
744
745		PROC_LOCK(p);
746		mtx_lock_spin(&sched_lock);
747		p->p_sflag &= ~PS_SWAPPINGIN;
748		p->p_sflag |= PS_INMEM;
749		FOREACH_THREAD_IN_PROC(p, td) {
750			TD_CLR_SWAPPED(td);
751			if (TD_CAN_RUN(td))
752				setrunnable(td);
753		}
754		mtx_unlock_spin(&sched_lock);
755
756		wakeup(&p->p_sflag);
757
758		/* Allow other threads to swap p out now. */
759		--p->p_lock;
760	}
761#endif /* NO_SWAPPING */
762}
763
764/*
765 * This swapin algorithm attempts to swap-in processes only if there
766 * is enough space for them.  Of course, if a process waits for a long
767 * time, it will be swapped in anyway.
768 *
769 *  XXXKSE - process with the thread with highest priority counts..
770 *
771 * Giant is still held at this point, to be released in tsleep.
772 */
773/* ARGSUSED*/
774static void
775scheduler(dummy)
776	void *dummy;
777{
778	struct proc *p;
779	struct thread *td;
780	int pri;
781	struct proc *pp;
782	int ppri;
783
784	mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED);
785	/* GIANT_REQUIRED */
786
787loop:
788	if (vm_page_count_min()) {
789		VM_WAIT;
790		goto loop;
791	}
792
793	pp = NULL;
794	ppri = INT_MIN;
795	sx_slock(&allproc_lock);
796	FOREACH_PROC_IN_SYSTEM(p) {
797		struct ksegrp *kg;
798		if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) {
799			continue;
800		}
801		mtx_lock_spin(&sched_lock);
802		FOREACH_THREAD_IN_PROC(p, td) {
803			/*
804			 * An otherwise runnable thread of a process
805			 * swapped out has only the TDI_SWAPPED bit set.
806			 *
807			 */
808			if (td->td_inhibitors == TDI_SWAPPED) {
809				kg = td->td_ksegrp;
810				pri = p->p_swtime + kg->kg_slptime;
811				if ((p->p_sflag & PS_SWAPINREQ) == 0) {
812					pri -= kg->kg_nice * 8;
813				}
814
815				/*
816				 * if this ksegrp is higher priority
817				 * and there is enough space, then select
818				 * this process instead of the previous
819				 * selection.
820				 */
821				if (pri > ppri) {
822					pp = p;
823					ppri = pri;
824				}
825			}
826		}
827		mtx_unlock_spin(&sched_lock);
828	}
829	sx_sunlock(&allproc_lock);
830
831	/*
832	 * Nothing to do, back to sleep.
833	 */
834	if ((p = pp) == NULL) {
835		tsleep(&proc0, PVM, "sched", maxslp * hz / 2);
836		goto loop;
837	}
838	PROC_LOCK(p);
839
840	/*
841	 * Another process may be bringing or may have already
842	 * brought this process in while we traverse all threads.
843	 * Or, this process may even be being swapped out again.
844	 */
845	if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) {
846		PROC_UNLOCK(p);
847		goto loop;
848	}
849
850	mtx_lock_spin(&sched_lock);
851	p->p_sflag &= ~PS_SWAPINREQ;
852	mtx_unlock_spin(&sched_lock);
853
854	/*
855	 * We would like to bring someone in. (only if there is space).
856	 * [What checks the space? ]
857	 */
858	faultin(p);
859	PROC_UNLOCK(p);
860	mtx_lock_spin(&sched_lock);
861	p->p_swtime = 0;
862	mtx_unlock_spin(&sched_lock);
863	goto loop;
864}
865
866#ifndef NO_SWAPPING
867
868/*
869 * Swap_idle_threshold1 is the guaranteed swapped in time for a process
870 */
871static int swap_idle_threshold1 = 2;
872SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW,
873    &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process");
874
875/*
876 * Swap_idle_threshold2 is the time that a process can be idle before
877 * it will be swapped out, if idle swapping is enabled.
878 */
879static int swap_idle_threshold2 = 10;
880SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW,
881    &swap_idle_threshold2, 0, "Time before a process will be swapped out");
882
883/*
884 * Swapout is driven by the pageout daemon.  Very simple, we find eligible
885 * procs and unwire their u-areas.  We try to always "swap" at least one
886 * process in case we need the room for a swapin.
887 * If any procs have been sleeping/stopped for at least maxslp seconds,
888 * they are swapped.  Else, we swap the longest-sleeping or stopped process,
889 * if any, otherwise the longest-resident process.
890 */
891void
892swapout_procs(action)
893int action;
894{
895	struct proc *p;
896	struct thread *td;
897	struct ksegrp *kg;
898	int didswap = 0;
899
900	GIANT_REQUIRED;
901
902retry:
903	sx_slock(&allproc_lock);
904	FOREACH_PROC_IN_SYSTEM(p) {
905		struct vmspace *vm;
906		int minslptime = 100000;
907
908		/*
909		 * Watch out for a process in
910		 * creation.  It may have no
911		 * address space or lock yet.
912		 */
913		mtx_lock_spin(&sched_lock);
914		if (p->p_state == PRS_NEW) {
915			mtx_unlock_spin(&sched_lock);
916			continue;
917		}
918		mtx_unlock_spin(&sched_lock);
919
920		/*
921		 * An aio daemon switches its
922		 * address space while running.
923		 * Perform a quick check whether
924		 * a process has P_SYSTEM.
925		 */
926		if ((p->p_flag & P_SYSTEM) != 0)
927			continue;
928
929		/*
930		 * Do not swapout a process that
931		 * is waiting for VM data
932		 * structures as there is a possible
933		 * deadlock.  Test this first as
934		 * this may block.
935		 *
936		 * Lock the map until swapout
937		 * finishes, or a thread of this
938		 * process may attempt to alter
939		 * the map.
940		 */
941		PROC_LOCK(p);
942		vm = p->p_vmspace;
943		KASSERT(vm != NULL,
944			("swapout_procs: a process has no address space"));
945		++vm->vm_refcnt;
946		PROC_UNLOCK(p);
947		if (!vm_map_trylock(&vm->vm_map))
948			goto nextproc1;
949
950		PROC_LOCK(p);
951		if (p->p_lock != 0 ||
952		    (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT)
953		    ) != 0) {
954			goto nextproc2;
955		}
956		/*
957		 * only aiod changes vmspace, however it will be
958		 * skipped because of the if statement above checking
959		 * for P_SYSTEM
960		 */
961		if ((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) != PS_INMEM)
962			goto nextproc2;
963
964		switch (p->p_state) {
965		default:
966			/* Don't swap out processes in any sort
967			 * of 'special' state. */
968			break;
969
970		case PRS_NORMAL:
971			mtx_lock_spin(&sched_lock);
972			/*
973			 * do not swapout a realtime process
974			 * Check all the thread groups..
975			 */
976			FOREACH_KSEGRP_IN_PROC(p, kg) {
977				if (PRI_IS_REALTIME(kg->kg_pri_class))
978					goto nextproc;
979
980				/*
981				 * Guarantee swap_idle_threshold1
982				 * time in memory.
983				 */
984				if (kg->kg_slptime < swap_idle_threshold1)
985					goto nextproc;
986
987				/*
988				 * Do not swapout a process if it is
989				 * waiting on a critical event of some
990				 * kind or there is a thread whose
991				 * pageable memory may be accessed.
992				 *
993				 * This could be refined to support
994				 * swapping out a thread.
995				 */
996				FOREACH_THREAD_IN_GROUP(kg, td) {
997					if ((td->td_priority) < PSOCK ||
998					    !thread_safetoswapout(td))
999						goto nextproc;
1000				}
1001				/*
1002				 * If the system is under memory stress,
1003				 * or if we are swapping
1004				 * idle processes >= swap_idle_threshold2,
1005				 * then swap the process out.
1006				 */
1007				if (((action & VM_SWAP_NORMAL) == 0) &&
1008				    (((action & VM_SWAP_IDLE) == 0) ||
1009				    (kg->kg_slptime < swap_idle_threshold2)))
1010					goto nextproc;
1011
1012				if (minslptime > kg->kg_slptime)
1013					minslptime = kg->kg_slptime;
1014			}
1015
1016			/*
1017			 * If the process has been asleep for awhile and had
1018			 * most of its pages taken away already, swap it out.
1019			 */
1020			if ((action & VM_SWAP_NORMAL) ||
1021				((action & VM_SWAP_IDLE) &&
1022				 (minslptime > swap_idle_threshold2))) {
1023				swapout(p);
1024				didswap++;
1025				mtx_unlock_spin(&sched_lock);
1026				PROC_UNLOCK(p);
1027				vm_map_unlock(&vm->vm_map);
1028				vmspace_free(vm);
1029				sx_sunlock(&allproc_lock);
1030				goto retry;
1031			}
1032nextproc:
1033			mtx_unlock_spin(&sched_lock);
1034		}
1035nextproc2:
1036		PROC_UNLOCK(p);
1037		vm_map_unlock(&vm->vm_map);
1038nextproc1:
1039		vmspace_free(vm);
1040		continue;
1041	}
1042	sx_sunlock(&allproc_lock);
1043	/*
1044	 * If we swapped something out, and another process needed memory,
1045	 * then wakeup the sched process.
1046	 */
1047	if (didswap)
1048		wakeup(&proc0);
1049}
1050
1051static void
1052swapout(p)
1053	struct proc *p;
1054{
1055	struct thread *td;
1056
1057	PROC_LOCK_ASSERT(p, MA_OWNED);
1058	mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
1059#if defined(SWAP_DEBUG)
1060	printf("swapping out %d\n", p->p_pid);
1061#endif
1062
1063	/*
1064	 * The states of this process and its threads may have changed
1065	 * by now.  Assuming that there is only one pageout daemon thread,
1066	 * this process should still be in memory.
1067	 */
1068	KASSERT((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) == PS_INMEM,
1069		("swapout: lost a swapout race?"));
1070
1071#if defined(INVARIANTS)
1072	/*
1073	 * Make sure that all threads are safe to be swapped out.
1074	 *
1075	 * Alternatively, we could swap out only safe threads.
1076	 */
1077	FOREACH_THREAD_IN_PROC(p, td) {
1078		KASSERT(thread_safetoswapout(td),
1079			("swapout: there is a thread not safe for swapout"));
1080	}
1081#endif /* INVARIANTS */
1082
1083	++p->p_stats->p_ru.ru_nswap;
1084	/*
1085	 * remember the process resident count
1086	 */
1087	p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
1088
1089	p->p_sflag &= ~PS_INMEM;
1090	p->p_sflag |= PS_SWAPPINGOUT;
1091	PROC_UNLOCK(p);
1092	FOREACH_THREAD_IN_PROC(p, td)
1093		TD_SET_SWAPPED(td);
1094	mtx_unlock_spin(&sched_lock);
1095
1096	vm_proc_swapout(p);
1097	FOREACH_THREAD_IN_PROC(p, td)
1098		vm_thread_swapout(td);
1099
1100	PROC_LOCK(p);
1101	mtx_lock_spin(&sched_lock);
1102	p->p_sflag &= ~PS_SWAPPINGOUT;
1103	p->p_swtime = 0;
1104}
1105#endif /* !NO_SWAPPING */
1106