vm_glue.c revision 198341
1/*-
2 * Copyright (c) 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 *	from: @(#)vm_glue.c	8.6 (Berkeley) 1/5/94
33 *
34 *
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
37 *
38 * Permission to use, copy, modify and distribute this software and
39 * its documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
46 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
51 *  School of Computer Science
52 *  Carnegie Mellon University
53 *  Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie the
56 * rights to redistribute these changes.
57 */
58
59#include <sys/cdefs.h>
60__FBSDID("$FreeBSD: head/sys/vm/vm_glue.c 198341 2009-10-21 18:38:02Z marcel $");
61
62#include "opt_vm.h"
63#include "opt_kstack_pages.h"
64#include "opt_kstack_max_pages.h"
65
66#include <sys/param.h>
67#include <sys/systm.h>
68#include <sys/limits.h>
69#include <sys/lock.h>
70#include <sys/mutex.h>
71#include <sys/proc.h>
72#include <sys/resourcevar.h>
73#include <sys/sched.h>
74#include <sys/sf_buf.h>
75#include <sys/shm.h>
76#include <sys/vmmeter.h>
77#include <sys/sx.h>
78#include <sys/sysctl.h>
79
80#include <sys/eventhandler.h>
81#include <sys/kernel.h>
82#include <sys/ktr.h>
83#include <sys/unistd.h>
84
85#include <vm/vm.h>
86#include <vm/vm_param.h>
87#include <vm/pmap.h>
88#include <vm/vm_map.h>
89#include <vm/vm_page.h>
90#include <vm/vm_pageout.h>
91#include <vm/vm_object.h>
92#include <vm/vm_kern.h>
93#include <vm/vm_extern.h>
94#include <vm/vm_pager.h>
95#include <vm/swap_pager.h>
96
97extern int maxslp;
98
99/*
100 * System initialization
101 *
102 * Note: proc0 from proc.h
103 */
104static void vm_init_limits(void *);
105SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0);
106
107/*
108 * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
109 *
110 * Note: run scheduling should be divorced from the vm system.
111 */
112static void scheduler(void *);
113SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_ANY, scheduler, NULL);
114
115#ifndef NO_SWAPPING
116static int swapout(struct proc *);
117static void swapclear(struct proc *);
118#endif
119
120/*
121 * MPSAFE
122 *
123 * WARNING!  This code calls vm_map_check_protection() which only checks
124 * the associated vm_map_entry range.  It does not determine whether the
125 * contents of the memory is actually readable or writable.  In most cases
126 * just checking the vm_map_entry is sufficient within the kernel's address
127 * space.
128 */
129int
130kernacc(addr, len, rw)
131	void *addr;
132	int len, rw;
133{
134	boolean_t rv;
135	vm_offset_t saddr, eaddr;
136	vm_prot_t prot;
137
138	KASSERT((rw & ~VM_PROT_ALL) == 0,
139	    ("illegal ``rw'' argument to kernacc (%x)\n", rw));
140
141	if ((vm_offset_t)addr + len > kernel_map->max_offset ||
142	    (vm_offset_t)addr + len < (vm_offset_t)addr)
143		return (FALSE);
144
145	prot = rw;
146	saddr = trunc_page((vm_offset_t)addr);
147	eaddr = round_page((vm_offset_t)addr + len);
148	vm_map_lock_read(kernel_map);
149	rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
150	vm_map_unlock_read(kernel_map);
151	return (rv == TRUE);
152}
153
154/*
155 * MPSAFE
156 *
157 * WARNING!  This code calls vm_map_check_protection() which only checks
158 * the associated vm_map_entry range.  It does not determine whether the
159 * contents of the memory is actually readable or writable.  vmapbuf(),
160 * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
161 * used in conjuction with this call.
162 */
163int
164useracc(addr, len, rw)
165	void *addr;
166	int len, rw;
167{
168	boolean_t rv;
169	vm_prot_t prot;
170	vm_map_t map;
171
172	KASSERT((rw & ~VM_PROT_ALL) == 0,
173	    ("illegal ``rw'' argument to useracc (%x)\n", rw));
174	prot = rw;
175	map = &curproc->p_vmspace->vm_map;
176	if ((vm_offset_t)addr + len > vm_map_max(map) ||
177	    (vm_offset_t)addr + len < (vm_offset_t)addr) {
178		return (FALSE);
179	}
180	vm_map_lock_read(map);
181	rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
182	    round_page((vm_offset_t)addr + len), prot);
183	vm_map_unlock_read(map);
184	return (rv == TRUE);
185}
186
187int
188vslock(void *addr, size_t len)
189{
190	vm_offset_t end, last, start;
191	vm_size_t npages;
192	int error;
193
194	last = (vm_offset_t)addr + len;
195	start = trunc_page((vm_offset_t)addr);
196	end = round_page(last);
197	if (last < (vm_offset_t)addr || end < (vm_offset_t)addr)
198		return (EINVAL);
199	npages = atop(end - start);
200	if (npages > vm_page_max_wired)
201		return (ENOMEM);
202	PROC_LOCK(curproc);
203	if (ptoa(npages +
204	    pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map))) >
205	    lim_cur(curproc, RLIMIT_MEMLOCK)) {
206		PROC_UNLOCK(curproc);
207		return (ENOMEM);
208	}
209	PROC_UNLOCK(curproc);
210#if 0
211	/*
212	 * XXX - not yet
213	 *
214	 * The limit for transient usage of wired pages should be
215	 * larger than for "permanent" wired pages (mlock()).
216	 *
217	 * Also, the sysctl code, which is the only present user
218	 * of vslock(), does a hard loop on EAGAIN.
219	 */
220	if (npages + cnt.v_wire_count > vm_page_max_wired)
221		return (EAGAIN);
222#endif
223	error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
224	    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
225	/*
226	 * Return EFAULT on error to match copy{in,out}() behaviour
227	 * rather than returning ENOMEM like mlock() would.
228	 */
229	return (error == KERN_SUCCESS ? 0 : EFAULT);
230}
231
232void
233vsunlock(void *addr, size_t len)
234{
235
236	/* Rely on the parameter sanity checks performed by vslock(). */
237	(void)vm_map_unwire(&curproc->p_vmspace->vm_map,
238	    trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
239	    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
240}
241
242/*
243 * Pin the page contained within the given object at the given offset.  If the
244 * page is not resident, allocate and load it using the given object's pager.
245 * Return the pinned page if successful; otherwise, return NULL.
246 */
247static vm_page_t
248vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
249{
250	vm_page_t m, ma[1];
251	vm_pindex_t pindex;
252	int rv;
253
254	VM_OBJECT_LOCK(object);
255	pindex = OFF_TO_IDX(offset);
256	m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
257	if (m->valid != VM_PAGE_BITS_ALL) {
258		ma[0] = m;
259		rv = vm_pager_get_pages(object, ma, 1, 0);
260		m = vm_page_lookup(object, pindex);
261		if (m == NULL)
262			goto out;
263		if (rv != VM_PAGER_OK) {
264			vm_page_lock_queues();
265			vm_page_free(m);
266			vm_page_unlock_queues();
267			m = NULL;
268			goto out;
269		}
270	}
271	vm_page_lock_queues();
272	vm_page_hold(m);
273	vm_page_unlock_queues();
274	vm_page_wakeup(m);
275out:
276	VM_OBJECT_UNLOCK(object);
277	return (m);
278}
279
280/*
281 * Return a CPU private mapping to the page at the given offset within the
282 * given object.  The page is pinned before it is mapped.
283 */
284struct sf_buf *
285vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset)
286{
287	vm_page_t m;
288
289	m = vm_imgact_hold_page(object, offset);
290	if (m == NULL)
291		return (NULL);
292	sched_pin();
293	return (sf_buf_alloc(m, SFB_CPUPRIVATE));
294}
295
296/*
297 * Destroy the given CPU private mapping and unpin the page that it mapped.
298 */
299void
300vm_imgact_unmap_page(struct sf_buf *sf)
301{
302	vm_page_t m;
303
304	m = sf_buf_page(sf);
305	sf_buf_free(sf);
306	sched_unpin();
307	vm_page_lock_queues();
308	vm_page_unhold(m);
309	vm_page_unlock_queues();
310}
311
312void
313vm_sync_icache(vm_map_t map, vm_offset_t va, vm_offset_t sz)
314{
315
316	pmap_sync_icache(map->pmap, va, sz);
317}
318
319struct kstack_cache_entry {
320	vm_object_t ksobj;
321	struct kstack_cache_entry *next_ks_entry;
322};
323
324static struct kstack_cache_entry *kstack_cache;
325static int kstack_cache_size = 128;
326static int kstacks;
327static struct mtx kstack_cache_mtx;
328SYSCTL_INT(_vm, OID_AUTO, kstack_cache_size, CTLFLAG_RW, &kstack_cache_size, 0,
329    "");
330SYSCTL_INT(_vm, OID_AUTO, kstacks, CTLFLAG_RD, &kstacks, 0,
331    "");
332
333#ifndef KSTACK_MAX_PAGES
334#define KSTACK_MAX_PAGES 32
335#endif
336
337/*
338 * Create the kernel stack (including pcb for i386) for a new thread.
339 * This routine directly affects the fork perf for a process and
340 * create performance for a thread.
341 */
342int
343vm_thread_new(struct thread *td, int pages)
344{
345	vm_object_t ksobj;
346	vm_offset_t ks;
347	vm_page_t m, ma[KSTACK_MAX_PAGES];
348	struct kstack_cache_entry *ks_ce;
349	int i;
350
351	/* Bounds check */
352	if (pages <= 1)
353		pages = KSTACK_PAGES;
354	else if (pages > KSTACK_MAX_PAGES)
355		pages = KSTACK_MAX_PAGES;
356
357	if (pages == KSTACK_PAGES) {
358		mtx_lock(&kstack_cache_mtx);
359		if (kstack_cache != NULL) {
360			ks_ce = kstack_cache;
361			kstack_cache = ks_ce->next_ks_entry;
362			mtx_unlock(&kstack_cache_mtx);
363
364			td->td_kstack_obj = ks_ce->ksobj;
365			td->td_kstack = (vm_offset_t)ks_ce;
366			td->td_kstack_pages = KSTACK_PAGES;
367			return (1);
368		}
369		mtx_unlock(&kstack_cache_mtx);
370	}
371
372	/*
373	 * Allocate an object for the kstack.
374	 */
375	ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
376
377	/*
378	 * Get a kernel virtual address for this thread's kstack.
379	 */
380	ks = kmem_alloc_nofault(kernel_map,
381	   (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
382	if (ks == 0) {
383		printf("vm_thread_new: kstack allocation failed\n");
384		vm_object_deallocate(ksobj);
385		return (0);
386	}
387
388	atomic_add_int(&kstacks, 1);
389	if (KSTACK_GUARD_PAGES != 0) {
390		pmap_qremove(ks, KSTACK_GUARD_PAGES);
391		ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
392	}
393	td->td_kstack_obj = ksobj;
394	td->td_kstack = ks;
395	/*
396	 * Knowing the number of pages allocated is useful when you
397	 * want to deallocate them.
398	 */
399	td->td_kstack_pages = pages;
400	/*
401	 * For the length of the stack, link in a real page of ram for each
402	 * page of stack.
403	 */
404	VM_OBJECT_LOCK(ksobj);
405	for (i = 0; i < pages; i++) {
406		/*
407		 * Get a kernel stack page.
408		 */
409		m = vm_page_grab(ksobj, i, VM_ALLOC_NOBUSY |
410		    VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
411		ma[i] = m;
412		m->valid = VM_PAGE_BITS_ALL;
413	}
414	VM_OBJECT_UNLOCK(ksobj);
415	pmap_qenter(ks, ma, pages);
416	return (1);
417}
418
419static void
420vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages)
421{
422	vm_page_t m;
423	int i;
424
425	atomic_add_int(&kstacks, -1);
426	pmap_qremove(ks, pages);
427	VM_OBJECT_LOCK(ksobj);
428	for (i = 0; i < pages; i++) {
429		m = vm_page_lookup(ksobj, i);
430		if (m == NULL)
431			panic("vm_thread_dispose: kstack already missing?");
432		vm_page_lock_queues();
433		vm_page_unwire(m, 0);
434		vm_page_free(m);
435		vm_page_unlock_queues();
436	}
437	VM_OBJECT_UNLOCK(ksobj);
438	vm_object_deallocate(ksobj);
439	kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
440	    (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
441}
442
443/*
444 * Dispose of a thread's kernel stack.
445 */
446void
447vm_thread_dispose(struct thread *td)
448{
449	vm_object_t ksobj;
450	vm_offset_t ks;
451	struct kstack_cache_entry *ks_ce;
452	int pages;
453
454	pages = td->td_kstack_pages;
455	ksobj = td->td_kstack_obj;
456	ks = td->td_kstack;
457	td->td_kstack = 0;
458	td->td_kstack_pages = 0;
459	if (pages == KSTACK_PAGES && kstacks <= kstack_cache_size) {
460		ks_ce = (struct kstack_cache_entry *)ks;
461		ks_ce->ksobj = ksobj;
462		mtx_lock(&kstack_cache_mtx);
463		ks_ce->next_ks_entry = kstack_cache;
464		kstack_cache = ks_ce;
465		mtx_unlock(&kstack_cache_mtx);
466		return;
467	}
468	vm_thread_stack_dispose(ksobj, ks, pages);
469}
470
471static void
472vm_thread_stack_lowmem(void *nulll)
473{
474	struct kstack_cache_entry *ks_ce, *ks_ce1;
475
476	mtx_lock(&kstack_cache_mtx);
477	ks_ce = kstack_cache;
478	kstack_cache = NULL;
479	mtx_unlock(&kstack_cache_mtx);
480
481	while (ks_ce != NULL) {
482		ks_ce1 = ks_ce;
483		ks_ce = ks_ce->next_ks_entry;
484
485		vm_thread_stack_dispose(ks_ce1->ksobj, (vm_offset_t)ks_ce1,
486		    KSTACK_PAGES);
487	}
488}
489
490static void
491kstack_cache_init(void *nulll)
492{
493
494	EVENTHANDLER_REGISTER(vm_lowmem, vm_thread_stack_lowmem, NULL,
495	    EVENTHANDLER_PRI_ANY);
496}
497
498MTX_SYSINIT(kstack_cache, &kstack_cache_mtx, "kstkch", MTX_DEF);
499SYSINIT(vm_kstacks, SI_SUB_KTHREAD_INIT, SI_ORDER_ANY, kstack_cache_init, NULL);
500
501/*
502 * Allow a thread's kernel stack to be paged out.
503 */
504void
505vm_thread_swapout(struct thread *td)
506{
507	vm_object_t ksobj;
508	vm_page_t m;
509	int i, pages;
510
511	cpu_thread_swapout(td);
512	pages = td->td_kstack_pages;
513	ksobj = td->td_kstack_obj;
514	pmap_qremove(td->td_kstack, pages);
515	VM_OBJECT_LOCK(ksobj);
516	for (i = 0; i < pages; i++) {
517		m = vm_page_lookup(ksobj, i);
518		if (m == NULL)
519			panic("vm_thread_swapout: kstack already missing?");
520		vm_page_lock_queues();
521		vm_page_dirty(m);
522		vm_page_unwire(m, 0);
523		vm_page_unlock_queues();
524	}
525	VM_OBJECT_UNLOCK(ksobj);
526}
527
528/*
529 * Bring the kernel stack for a specified thread back in.
530 */
531void
532vm_thread_swapin(struct thread *td)
533{
534	vm_object_t ksobj;
535	vm_page_t m, ma[KSTACK_MAX_PAGES];
536	int i, pages, rv;
537
538	pages = td->td_kstack_pages;
539	ksobj = td->td_kstack_obj;
540	VM_OBJECT_LOCK(ksobj);
541	for (i = 0; i < pages; i++) {
542		m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
543		if (m->valid != VM_PAGE_BITS_ALL) {
544			rv = vm_pager_get_pages(ksobj, &m, 1, 0);
545			if (rv != VM_PAGER_OK)
546				panic("vm_thread_swapin: cannot get kstack for proc: %d", td->td_proc->p_pid);
547			m = vm_page_lookup(ksobj, i);
548		}
549		ma[i] = m;
550		vm_page_lock_queues();
551		vm_page_wire(m);
552		vm_page_unlock_queues();
553		vm_page_wakeup(m);
554	}
555	VM_OBJECT_UNLOCK(ksobj);
556	pmap_qenter(td->td_kstack, ma, pages);
557	cpu_thread_swapin(td);
558}
559
560/*
561 * Implement fork's actions on an address space.
562 * Here we arrange for the address space to be copied or referenced,
563 * allocate a user struct (pcb and kernel stack), then call the
564 * machine-dependent layer to fill those in and make the new process
565 * ready to run.  The new process is set up so that it returns directly
566 * to user mode to avoid stack copying and relocation problems.
567 */
568int
569vm_forkproc(td, p2, td2, vm2, flags)
570	struct thread *td;
571	struct proc *p2;
572	struct thread *td2;
573	struct vmspace *vm2;
574	int flags;
575{
576	struct proc *p1 = td->td_proc;
577	int error;
578
579	if ((flags & RFPROC) == 0) {
580		/*
581		 * Divorce the memory, if it is shared, essentially
582		 * this changes shared memory amongst threads, into
583		 * COW locally.
584		 */
585		if ((flags & RFMEM) == 0) {
586			if (p1->p_vmspace->vm_refcnt > 1) {
587				error = vmspace_unshare(p1);
588				if (error)
589					return (error);
590			}
591		}
592		cpu_fork(td, p2, td2, flags);
593		return (0);
594	}
595
596	if (flags & RFMEM) {
597		p2->p_vmspace = p1->p_vmspace;
598		atomic_add_int(&p1->p_vmspace->vm_refcnt, 1);
599	}
600
601	while (vm_page_count_severe()) {
602		VM_WAIT;
603	}
604
605	if ((flags & RFMEM) == 0) {
606		p2->p_vmspace = vm2;
607		if (p1->p_vmspace->vm_shm)
608			shmfork(p1, p2);
609	}
610
611	/*
612	 * cpu_fork will copy and update the pcb, set up the kernel stack,
613	 * and make the child ready to run.
614	 */
615	cpu_fork(td, p2, td2, flags);
616	return (0);
617}
618
619/*
620 * Called after process has been wait(2)'ed apon and is being reaped.
621 * The idea is to reclaim resources that we could not reclaim while
622 * the process was still executing.
623 */
624void
625vm_waitproc(p)
626	struct proc *p;
627{
628
629	vmspace_exitfree(p);		/* and clean-out the vmspace */
630}
631
632/*
633 * Set default limits for VM system.
634 * Called for proc 0, and then inherited by all others.
635 *
636 * XXX should probably act directly on proc0.
637 */
638static void
639vm_init_limits(udata)
640	void *udata;
641{
642	struct proc *p = udata;
643	struct plimit *limp;
644	int rss_limit;
645
646	/*
647	 * Set up the initial limits on process VM. Set the maximum resident
648	 * set size to be half of (reasonably) available memory.  Since this
649	 * is a soft limit, it comes into effect only when the system is out
650	 * of memory - half of main memory helps to favor smaller processes,
651	 * and reduces thrashing of the object cache.
652	 */
653	limp = p->p_limit;
654	limp->pl_rlimit[RLIMIT_STACK].rlim_cur = dflssiz;
655	limp->pl_rlimit[RLIMIT_STACK].rlim_max = maxssiz;
656	limp->pl_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz;
657	limp->pl_rlimit[RLIMIT_DATA].rlim_max = maxdsiz;
658	/* limit the limit to no less than 2MB */
659	rss_limit = max(cnt.v_free_count, 512);
660	limp->pl_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
661	limp->pl_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
662}
663
664void
665faultin(p)
666	struct proc *p;
667{
668#ifdef NO_SWAPPING
669
670	PROC_LOCK_ASSERT(p, MA_OWNED);
671	if ((p->p_flag & P_INMEM) == 0)
672		panic("faultin: proc swapped out with NO_SWAPPING!");
673#else /* !NO_SWAPPING */
674	struct thread *td;
675
676	PROC_LOCK_ASSERT(p, MA_OWNED);
677	/*
678	 * If another process is swapping in this process,
679	 * just wait until it finishes.
680	 */
681	if (p->p_flag & P_SWAPPINGIN) {
682		while (p->p_flag & P_SWAPPINGIN)
683			msleep(&p->p_flag, &p->p_mtx, PVM, "faultin", 0);
684		return;
685	}
686	if ((p->p_flag & P_INMEM) == 0) {
687		/*
688		 * Don't let another thread swap process p out while we are
689		 * busy swapping it in.
690		 */
691		++p->p_lock;
692		p->p_flag |= P_SWAPPINGIN;
693		PROC_UNLOCK(p);
694
695		/*
696		 * We hold no lock here because the list of threads
697		 * can not change while all threads in the process are
698		 * swapped out.
699		 */
700		FOREACH_THREAD_IN_PROC(p, td)
701			vm_thread_swapin(td);
702		PROC_LOCK(p);
703		swapclear(p);
704		p->p_swtick = ticks;
705
706		wakeup(&p->p_flag);
707
708		/* Allow other threads to swap p out now. */
709		--p->p_lock;
710	}
711#endif /* NO_SWAPPING */
712}
713
714/*
715 * This swapin algorithm attempts to swap-in processes only if there
716 * is enough space for them.  Of course, if a process waits for a long
717 * time, it will be swapped in anyway.
718 *
719 * Giant is held on entry.
720 */
721/* ARGSUSED*/
722static void
723scheduler(dummy)
724	void *dummy;
725{
726	struct proc *p;
727	struct thread *td;
728	struct proc *pp;
729	int slptime;
730	int swtime;
731	int ppri;
732	int pri;
733
734	mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED);
735	mtx_unlock(&Giant);
736
737loop:
738	if (vm_page_count_min()) {
739		VM_WAIT;
740		goto loop;
741	}
742
743	pp = NULL;
744	ppri = INT_MIN;
745	sx_slock(&allproc_lock);
746	FOREACH_PROC_IN_SYSTEM(p) {
747		PROC_LOCK(p);
748		if (p->p_flag & (P_SWAPPINGOUT | P_SWAPPINGIN | P_INMEM)) {
749			PROC_UNLOCK(p);
750			continue;
751		}
752		swtime = (ticks - p->p_swtick) / hz;
753		FOREACH_THREAD_IN_PROC(p, td) {
754			/*
755			 * An otherwise runnable thread of a process
756			 * swapped out has only the TDI_SWAPPED bit set.
757			 *
758			 */
759			thread_lock(td);
760			if (td->td_inhibitors == TDI_SWAPPED) {
761				slptime = (ticks - td->td_slptick) / hz;
762				pri = swtime + slptime;
763				if ((td->td_flags & TDF_SWAPINREQ) == 0)
764					pri -= p->p_nice * 8;
765				/*
766				 * if this thread is higher priority
767				 * and there is enough space, then select
768				 * this process instead of the previous
769				 * selection.
770				 */
771				if (pri > ppri) {
772					pp = p;
773					ppri = pri;
774				}
775			}
776			thread_unlock(td);
777		}
778		PROC_UNLOCK(p);
779	}
780	sx_sunlock(&allproc_lock);
781
782	/*
783	 * Nothing to do, back to sleep.
784	 */
785	if ((p = pp) == NULL) {
786		tsleep(&proc0, PVM, "sched", maxslp * hz / 2);
787		goto loop;
788	}
789	PROC_LOCK(p);
790
791	/*
792	 * Another process may be bringing or may have already
793	 * brought this process in while we traverse all threads.
794	 * Or, this process may even be being swapped out again.
795	 */
796	if (p->p_flag & (P_INMEM | P_SWAPPINGOUT | P_SWAPPINGIN)) {
797		PROC_UNLOCK(p);
798		goto loop;
799	}
800
801	/*
802	 * We would like to bring someone in. (only if there is space).
803	 * [What checks the space? ]
804	 */
805	faultin(p);
806	PROC_UNLOCK(p);
807	goto loop;
808}
809
810void
811kick_proc0(void)
812{
813
814	wakeup(&proc0);
815}
816
817#ifndef NO_SWAPPING
818
819/*
820 * Swap_idle_threshold1 is the guaranteed swapped in time for a process
821 */
822static int swap_idle_threshold1 = 2;
823SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW,
824    &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process");
825
826/*
827 * Swap_idle_threshold2 is the time that a process can be idle before
828 * it will be swapped out, if idle swapping is enabled.
829 */
830static int swap_idle_threshold2 = 10;
831SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW,
832    &swap_idle_threshold2, 0, "Time before a process will be swapped out");
833
834/*
835 * Swapout is driven by the pageout daemon.  Very simple, we find eligible
836 * procs and swap out their stacks.  We try to always "swap" at least one
837 * process in case we need the room for a swapin.
838 * If any procs have been sleeping/stopped for at least maxslp seconds,
839 * they are swapped.  Else, we swap the longest-sleeping or stopped process,
840 * if any, otherwise the longest-resident process.
841 */
842void
843swapout_procs(action)
844int action;
845{
846	struct proc *p;
847	struct thread *td;
848	int didswap = 0;
849
850retry:
851	sx_slock(&allproc_lock);
852	FOREACH_PROC_IN_SYSTEM(p) {
853		struct vmspace *vm;
854		int minslptime = 100000;
855		int slptime;
856
857		/*
858		 * Watch out for a process in
859		 * creation.  It may have no
860		 * address space or lock yet.
861		 */
862		if (p->p_state == PRS_NEW)
863			continue;
864		/*
865		 * An aio daemon switches its
866		 * address space while running.
867		 * Perform a quick check whether
868		 * a process has P_SYSTEM.
869		 */
870		if ((p->p_flag & P_SYSTEM) != 0)
871			continue;
872		/*
873		 * Do not swapout a process that
874		 * is waiting for VM data
875		 * structures as there is a possible
876		 * deadlock.  Test this first as
877		 * this may block.
878		 *
879		 * Lock the map until swapout
880		 * finishes, or a thread of this
881		 * process may attempt to alter
882		 * the map.
883		 */
884		vm = vmspace_acquire_ref(p);
885		if (vm == NULL)
886			continue;
887		if (!vm_map_trylock(&vm->vm_map))
888			goto nextproc1;
889
890		PROC_LOCK(p);
891		if (p->p_lock != 0 ||
892		    (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT)
893		    ) != 0) {
894			goto nextproc;
895		}
896		/*
897		 * only aiod changes vmspace, however it will be
898		 * skipped because of the if statement above checking
899		 * for P_SYSTEM
900		 */
901		if ((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) != P_INMEM)
902			goto nextproc;
903
904		switch (p->p_state) {
905		default:
906			/* Don't swap out processes in any sort
907			 * of 'special' state. */
908			break;
909
910		case PRS_NORMAL:
911			/*
912			 * do not swapout a realtime process
913			 * Check all the thread groups..
914			 */
915			FOREACH_THREAD_IN_PROC(p, td) {
916				thread_lock(td);
917				if (PRI_IS_REALTIME(td->td_pri_class)) {
918					thread_unlock(td);
919					goto nextproc;
920				}
921				slptime = (ticks - td->td_slptick) / hz;
922				/*
923				 * Guarantee swap_idle_threshold1
924				 * time in memory.
925				 */
926				if (slptime < swap_idle_threshold1) {
927					thread_unlock(td);
928					goto nextproc;
929				}
930
931				/*
932				 * Do not swapout a process if it is
933				 * waiting on a critical event of some
934				 * kind or there is a thread whose
935				 * pageable memory may be accessed.
936				 *
937				 * This could be refined to support
938				 * swapping out a thread.
939				 */
940				if (!thread_safetoswapout(td)) {
941					thread_unlock(td);
942					goto nextproc;
943				}
944				/*
945				 * If the system is under memory stress,
946				 * or if we are swapping
947				 * idle processes >= swap_idle_threshold2,
948				 * then swap the process out.
949				 */
950				if (((action & VM_SWAP_NORMAL) == 0) &&
951				    (((action & VM_SWAP_IDLE) == 0) ||
952				    (slptime < swap_idle_threshold2))) {
953					thread_unlock(td);
954					goto nextproc;
955				}
956
957				if (minslptime > slptime)
958					minslptime = slptime;
959				thread_unlock(td);
960			}
961
962			/*
963			 * If the pageout daemon didn't free enough pages,
964			 * or if this process is idle and the system is
965			 * configured to swap proactively, swap it out.
966			 */
967			if ((action & VM_SWAP_NORMAL) ||
968				((action & VM_SWAP_IDLE) &&
969				 (minslptime > swap_idle_threshold2))) {
970				if (swapout(p) == 0)
971					didswap++;
972				PROC_UNLOCK(p);
973				vm_map_unlock(&vm->vm_map);
974				vmspace_free(vm);
975				sx_sunlock(&allproc_lock);
976				goto retry;
977			}
978		}
979nextproc:
980		PROC_UNLOCK(p);
981		vm_map_unlock(&vm->vm_map);
982nextproc1:
983		vmspace_free(vm);
984		continue;
985	}
986	sx_sunlock(&allproc_lock);
987	/*
988	 * If we swapped something out, and another process needed memory,
989	 * then wakeup the sched process.
990	 */
991	if (didswap)
992		wakeup(&proc0);
993}
994
995static void
996swapclear(p)
997	struct proc *p;
998{
999	struct thread *td;
1000
1001	PROC_LOCK_ASSERT(p, MA_OWNED);
1002
1003	FOREACH_THREAD_IN_PROC(p, td) {
1004		thread_lock(td);
1005		td->td_flags |= TDF_INMEM;
1006		td->td_flags &= ~TDF_SWAPINREQ;
1007		TD_CLR_SWAPPED(td);
1008		if (TD_CAN_RUN(td))
1009			if (setrunnable(td)) {
1010#ifdef INVARIANTS
1011				/*
1012				 * XXX: We just cleared TDI_SWAPPED
1013				 * above and set TDF_INMEM, so this
1014				 * should never happen.
1015				 */
1016				panic("not waking up swapper");
1017#endif
1018			}
1019		thread_unlock(td);
1020	}
1021	p->p_flag &= ~(P_SWAPPINGIN|P_SWAPPINGOUT);
1022	p->p_flag |= P_INMEM;
1023}
1024
1025static int
1026swapout(p)
1027	struct proc *p;
1028{
1029	struct thread *td;
1030
1031	PROC_LOCK_ASSERT(p, MA_OWNED);
1032#if defined(SWAP_DEBUG)
1033	printf("swapping out %d\n", p->p_pid);
1034#endif
1035
1036	/*
1037	 * The states of this process and its threads may have changed
1038	 * by now.  Assuming that there is only one pageout daemon thread,
1039	 * this process should still be in memory.
1040	 */
1041	KASSERT((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) == P_INMEM,
1042		("swapout: lost a swapout race?"));
1043
1044	/*
1045	 * remember the process resident count
1046	 */
1047	p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
1048	/*
1049	 * Check and mark all threads before we proceed.
1050	 */
1051	p->p_flag &= ~P_INMEM;
1052	p->p_flag |= P_SWAPPINGOUT;
1053	FOREACH_THREAD_IN_PROC(p, td) {
1054		thread_lock(td);
1055		if (!thread_safetoswapout(td)) {
1056			thread_unlock(td);
1057			swapclear(p);
1058			return (EBUSY);
1059		}
1060		td->td_flags &= ~TDF_INMEM;
1061		TD_SET_SWAPPED(td);
1062		thread_unlock(td);
1063	}
1064	td = FIRST_THREAD_IN_PROC(p);
1065	++td->td_ru.ru_nswap;
1066	PROC_UNLOCK(p);
1067
1068	/*
1069	 * This list is stable because all threads are now prevented from
1070	 * running.  The list is only modified in the context of a running
1071	 * thread in this process.
1072	 */
1073	FOREACH_THREAD_IN_PROC(p, td)
1074		vm_thread_swapout(td);
1075
1076	PROC_LOCK(p);
1077	p->p_flag &= ~P_SWAPPINGOUT;
1078	p->p_swtick = ticks;
1079	return (0);
1080}
1081#endif /* !NO_SWAPPING */
1082