vm_glue.c revision 178272
1/*-
2 * Copyright (c) 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 *	from: @(#)vm_glue.c	8.6 (Berkeley) 1/5/94
33 *
34 *
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
37 *
38 * Permission to use, copy, modify and distribute this software and
39 * its documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
46 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
51 *  School of Computer Science
52 *  Carnegie Mellon University
53 *  Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie the
56 * rights to redistribute these changes.
57 */
58
59#include <sys/cdefs.h>
60__FBSDID("$FreeBSD: head/sys/vm/vm_glue.c 178272 2008-04-17 04:20:10Z jeff $");
61
62#include "opt_vm.h"
63#include "opt_kstack_pages.h"
64#include "opt_kstack_max_pages.h"
65
66#include <sys/param.h>
67#include <sys/systm.h>
68#include <sys/limits.h>
69#include <sys/lock.h>
70#include <sys/mutex.h>
71#include <sys/proc.h>
72#include <sys/resourcevar.h>
73#include <sys/sched.h>
74#include <sys/sf_buf.h>
75#include <sys/shm.h>
76#include <sys/vmmeter.h>
77#include <sys/sx.h>
78#include <sys/sysctl.h>
79
80#include <sys/kernel.h>
81#include <sys/ktr.h>
82#include <sys/unistd.h>
83
84#include <vm/vm.h>
85#include <vm/vm_param.h>
86#include <vm/pmap.h>
87#include <vm/vm_map.h>
88#include <vm/vm_page.h>
89#include <vm/vm_pageout.h>
90#include <vm/vm_object.h>
91#include <vm/vm_kern.h>
92#include <vm/vm_extern.h>
93#include <vm/vm_pager.h>
94#include <vm/swap_pager.h>
95
96extern int maxslp;
97
98/*
99 * System initialization
100 *
101 * Note: proc0 from proc.h
102 */
103static void vm_init_limits(void *);
104SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0);
105
106/*
107 * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
108 *
109 * Note: run scheduling should be divorced from the vm system.
110 */
111static void scheduler(void *);
112SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_ANY, scheduler, NULL);
113
114#ifndef NO_SWAPPING
115static int swapout(struct proc *);
116static void swapclear(struct proc *);
117#endif
118
119
120static volatile int proc0_rescan;
121
122
123/*
124 * MPSAFE
125 *
126 * WARNING!  This code calls vm_map_check_protection() which only checks
127 * the associated vm_map_entry range.  It does not determine whether the
128 * contents of the memory is actually readable or writable.  In most cases
129 * just checking the vm_map_entry is sufficient within the kernel's address
130 * space.
131 */
132int
133kernacc(addr, len, rw)
134	void *addr;
135	int len, rw;
136{
137	boolean_t rv;
138	vm_offset_t saddr, eaddr;
139	vm_prot_t prot;
140
141	KASSERT((rw & ~VM_PROT_ALL) == 0,
142	    ("illegal ``rw'' argument to kernacc (%x)\n", rw));
143
144	if ((vm_offset_t)addr + len > kernel_map->max_offset ||
145	    (vm_offset_t)addr + len < (vm_offset_t)addr)
146		return (FALSE);
147
148	prot = rw;
149	saddr = trunc_page((vm_offset_t)addr);
150	eaddr = round_page((vm_offset_t)addr + len);
151	vm_map_lock_read(kernel_map);
152	rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
153	vm_map_unlock_read(kernel_map);
154	return (rv == TRUE);
155}
156
157/*
158 * MPSAFE
159 *
160 * WARNING!  This code calls vm_map_check_protection() which only checks
161 * the associated vm_map_entry range.  It does not determine whether the
162 * contents of the memory is actually readable or writable.  vmapbuf(),
163 * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
164 * used in conjuction with this call.
165 */
166int
167useracc(addr, len, rw)
168	void *addr;
169	int len, rw;
170{
171	boolean_t rv;
172	vm_prot_t prot;
173	vm_map_t map;
174
175	KASSERT((rw & ~VM_PROT_ALL) == 0,
176	    ("illegal ``rw'' argument to useracc (%x)\n", rw));
177	prot = rw;
178	map = &curproc->p_vmspace->vm_map;
179	if ((vm_offset_t)addr + len > vm_map_max(map) ||
180	    (vm_offset_t)addr + len < (vm_offset_t)addr) {
181		return (FALSE);
182	}
183	vm_map_lock_read(map);
184	rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
185	    round_page((vm_offset_t)addr + len), prot);
186	vm_map_unlock_read(map);
187	return (rv == TRUE);
188}
189
190int
191vslock(void *addr, size_t len)
192{
193	vm_offset_t end, last, start;
194	vm_size_t npages;
195	int error;
196
197	last = (vm_offset_t)addr + len;
198	start = trunc_page((vm_offset_t)addr);
199	end = round_page(last);
200	if (last < (vm_offset_t)addr || end < (vm_offset_t)addr)
201		return (EINVAL);
202	npages = atop(end - start);
203	if (npages > vm_page_max_wired)
204		return (ENOMEM);
205	PROC_LOCK(curproc);
206	if (ptoa(npages +
207	    pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map))) >
208	    lim_cur(curproc, RLIMIT_MEMLOCK)) {
209		PROC_UNLOCK(curproc);
210		return (ENOMEM);
211	}
212	PROC_UNLOCK(curproc);
213#if 0
214	/*
215	 * XXX - not yet
216	 *
217	 * The limit for transient usage of wired pages should be
218	 * larger than for "permanent" wired pages (mlock()).
219	 *
220	 * Also, the sysctl code, which is the only present user
221	 * of vslock(), does a hard loop on EAGAIN.
222	 */
223	if (npages + cnt.v_wire_count > vm_page_max_wired)
224		return (EAGAIN);
225#endif
226	error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
227	    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
228	/*
229	 * Return EFAULT on error to match copy{in,out}() behaviour
230	 * rather than returning ENOMEM like mlock() would.
231	 */
232	return (error == KERN_SUCCESS ? 0 : EFAULT);
233}
234
235void
236vsunlock(void *addr, size_t len)
237{
238
239	/* Rely on the parameter sanity checks performed by vslock(). */
240	(void)vm_map_unwire(&curproc->p_vmspace->vm_map,
241	    trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
242	    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
243}
244
245/*
246 * Pin the page contained within the given object at the given offset.  If the
247 * page is not resident, allocate and load it using the given object's pager.
248 * Return the pinned page if successful; otherwise, return NULL.
249 */
250static vm_page_t
251vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
252{
253	vm_page_t m, ma[1];
254	vm_pindex_t pindex;
255	int rv;
256
257	VM_OBJECT_LOCK(object);
258	pindex = OFF_TO_IDX(offset);
259	m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
260	if ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) {
261		ma[0] = m;
262		rv = vm_pager_get_pages(object, ma, 1, 0);
263		m = vm_page_lookup(object, pindex);
264		if (m == NULL)
265			goto out;
266		if (m->valid == 0 || rv != VM_PAGER_OK) {
267			vm_page_lock_queues();
268			vm_page_free(m);
269			vm_page_unlock_queues();
270			m = NULL;
271			goto out;
272		}
273	}
274	vm_page_lock_queues();
275	vm_page_hold(m);
276	vm_page_unlock_queues();
277	vm_page_wakeup(m);
278out:
279	VM_OBJECT_UNLOCK(object);
280	return (m);
281}
282
283/*
284 * Return a CPU private mapping to the page at the given offset within the
285 * given object.  The page is pinned before it is mapped.
286 */
287struct sf_buf *
288vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset)
289{
290	vm_page_t m;
291
292	m = vm_imgact_hold_page(object, offset);
293	if (m == NULL)
294		return (NULL);
295	sched_pin();
296	return (sf_buf_alloc(m, SFB_CPUPRIVATE));
297}
298
299/*
300 * Destroy the given CPU private mapping and unpin the page that it mapped.
301 */
302void
303vm_imgact_unmap_page(struct sf_buf *sf)
304{
305	vm_page_t m;
306
307	m = sf_buf_page(sf);
308	sf_buf_free(sf);
309	sched_unpin();
310	vm_page_lock_queues();
311	vm_page_unhold(m);
312	vm_page_unlock_queues();
313}
314
315#ifndef KSTACK_MAX_PAGES
316#define KSTACK_MAX_PAGES 32
317#endif
318
319/*
320 * Create the kernel stack (including pcb for i386) for a new thread.
321 * This routine directly affects the fork perf for a process and
322 * create performance for a thread.
323 */
324int
325vm_thread_new(struct thread *td, int pages)
326{
327	vm_object_t ksobj;
328	vm_offset_t ks;
329	vm_page_t m, ma[KSTACK_MAX_PAGES];
330	int i;
331
332	/* Bounds check */
333	if (pages <= 1)
334		pages = KSTACK_PAGES;
335	else if (pages > KSTACK_MAX_PAGES)
336		pages = KSTACK_MAX_PAGES;
337	/*
338	 * Allocate an object for the kstack.
339	 */
340	ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
341
342	/*
343	 * Get a kernel virtual address for this thread's kstack.
344	 */
345	ks = kmem_alloc_nofault(kernel_map,
346	   (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
347	if (ks == 0) {
348		printf("vm_thread_new: kstack allocation failed\n");
349		vm_object_deallocate(ksobj);
350		return (0);
351	}
352
353	if (KSTACK_GUARD_PAGES != 0) {
354		pmap_qremove(ks, KSTACK_GUARD_PAGES);
355		ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
356	}
357	td->td_kstack_obj = ksobj;
358	td->td_kstack = ks;
359	/*
360	 * Knowing the number of pages allocated is useful when you
361	 * want to deallocate them.
362	 */
363	td->td_kstack_pages = pages;
364	/*
365	 * For the length of the stack, link in a real page of ram for each
366	 * page of stack.
367	 */
368	VM_OBJECT_LOCK(ksobj);
369	for (i = 0; i < pages; i++) {
370		/*
371		 * Get a kernel stack page.
372		 */
373		m = vm_page_grab(ksobj, i, VM_ALLOC_NOBUSY |
374		    VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
375		ma[i] = m;
376		m->valid = VM_PAGE_BITS_ALL;
377	}
378	VM_OBJECT_UNLOCK(ksobj);
379	pmap_qenter(ks, ma, pages);
380	return (1);
381}
382
383/*
384 * Dispose of a thread's kernel stack.
385 */
386void
387vm_thread_dispose(struct thread *td)
388{
389	vm_object_t ksobj;
390	vm_offset_t ks;
391	vm_page_t m;
392	int i, pages;
393
394	pages = td->td_kstack_pages;
395	ksobj = td->td_kstack_obj;
396	ks = td->td_kstack;
397	pmap_qremove(ks, pages);
398	VM_OBJECT_LOCK(ksobj);
399	for (i = 0; i < pages; i++) {
400		m = vm_page_lookup(ksobj, i);
401		if (m == NULL)
402			panic("vm_thread_dispose: kstack already missing?");
403		vm_page_lock_queues();
404		vm_page_unwire(m, 0);
405		vm_page_free(m);
406		vm_page_unlock_queues();
407	}
408	VM_OBJECT_UNLOCK(ksobj);
409	vm_object_deallocate(ksobj);
410	kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
411	    (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
412	td->td_kstack = 0;
413}
414
415/*
416 * Allow a thread's kernel stack to be paged out.
417 */
418void
419vm_thread_swapout(struct thread *td)
420{
421	vm_object_t ksobj;
422	vm_page_t m;
423	int i, pages;
424
425	cpu_thread_swapout(td);
426	pages = td->td_kstack_pages;
427	ksobj = td->td_kstack_obj;
428	pmap_qremove(td->td_kstack, pages);
429	VM_OBJECT_LOCK(ksobj);
430	for (i = 0; i < pages; i++) {
431		m = vm_page_lookup(ksobj, i);
432		if (m == NULL)
433			panic("vm_thread_swapout: kstack already missing?");
434		vm_page_lock_queues();
435		vm_page_dirty(m);
436		vm_page_unwire(m, 0);
437		vm_page_unlock_queues();
438	}
439	VM_OBJECT_UNLOCK(ksobj);
440}
441
442/*
443 * Bring the kernel stack for a specified thread back in.
444 */
445void
446vm_thread_swapin(struct thread *td)
447{
448	vm_object_t ksobj;
449	vm_page_t m, ma[KSTACK_MAX_PAGES];
450	int i, pages, rv;
451
452	pages = td->td_kstack_pages;
453	ksobj = td->td_kstack_obj;
454	VM_OBJECT_LOCK(ksobj);
455	for (i = 0; i < pages; i++) {
456		m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
457		if (m->valid != VM_PAGE_BITS_ALL) {
458			rv = vm_pager_get_pages(ksobj, &m, 1, 0);
459			if (rv != VM_PAGER_OK)
460				panic("vm_thread_swapin: cannot get kstack for proc: %d", td->td_proc->p_pid);
461			m = vm_page_lookup(ksobj, i);
462			m->valid = VM_PAGE_BITS_ALL;
463		}
464		ma[i] = m;
465		vm_page_lock_queues();
466		vm_page_wire(m);
467		vm_page_unlock_queues();
468		vm_page_wakeup(m);
469	}
470	VM_OBJECT_UNLOCK(ksobj);
471	pmap_qenter(td->td_kstack, ma, pages);
472	cpu_thread_swapin(td);
473}
474
475/*
476 * Set up a variable-sized alternate kstack.
477 */
478int
479vm_thread_new_altkstack(struct thread *td, int pages)
480{
481
482	td->td_altkstack = td->td_kstack;
483	td->td_altkstack_obj = td->td_kstack_obj;
484	td->td_altkstack_pages = td->td_kstack_pages;
485
486	return (vm_thread_new(td, pages));
487}
488
489/*
490 * Restore the original kstack.
491 */
492void
493vm_thread_dispose_altkstack(struct thread *td)
494{
495
496	vm_thread_dispose(td);
497
498	td->td_kstack = td->td_altkstack;
499	td->td_kstack_obj = td->td_altkstack_obj;
500	td->td_kstack_pages = td->td_altkstack_pages;
501	td->td_altkstack = 0;
502	td->td_altkstack_obj = NULL;
503	td->td_altkstack_pages = 0;
504}
505
506/*
507 * Implement fork's actions on an address space.
508 * Here we arrange for the address space to be copied or referenced,
509 * allocate a user struct (pcb and kernel stack), then call the
510 * machine-dependent layer to fill those in and make the new process
511 * ready to run.  The new process is set up so that it returns directly
512 * to user mode to avoid stack copying and relocation problems.
513 */
514int
515vm_forkproc(td, p2, td2, vm2, flags)
516	struct thread *td;
517	struct proc *p2;
518	struct thread *td2;
519	struct vmspace *vm2;
520	int flags;
521{
522	struct proc *p1 = td->td_proc;
523	int error;
524
525	if ((flags & RFPROC) == 0) {
526		/*
527		 * Divorce the memory, if it is shared, essentially
528		 * this changes shared memory amongst threads, into
529		 * COW locally.
530		 */
531		if ((flags & RFMEM) == 0) {
532			if (p1->p_vmspace->vm_refcnt > 1) {
533				error = vmspace_unshare(p1);
534				if (error)
535					return (error);
536			}
537		}
538		cpu_fork(td, p2, td2, flags);
539		return (0);
540	}
541
542	if (flags & RFMEM) {
543		p2->p_vmspace = p1->p_vmspace;
544		atomic_add_int(&p1->p_vmspace->vm_refcnt, 1);
545	}
546
547	while (vm_page_count_severe()) {
548		VM_WAIT;
549	}
550
551	if ((flags & RFMEM) == 0) {
552		p2->p_vmspace = vm2;
553		if (p1->p_vmspace->vm_shm)
554			shmfork(p1, p2);
555	}
556
557	/*
558	 * cpu_fork will copy and update the pcb, set up the kernel stack,
559	 * and make the child ready to run.
560	 */
561	cpu_fork(td, p2, td2, flags);
562	return (0);
563}
564
565/*
566 * Called after process has been wait(2)'ed apon and is being reaped.
567 * The idea is to reclaim resources that we could not reclaim while
568 * the process was still executing.
569 */
570void
571vm_waitproc(p)
572	struct proc *p;
573{
574
575	vmspace_exitfree(p);		/* and clean-out the vmspace */
576}
577
578/*
579 * Set default limits for VM system.
580 * Called for proc 0, and then inherited by all others.
581 *
582 * XXX should probably act directly on proc0.
583 */
584static void
585vm_init_limits(udata)
586	void *udata;
587{
588	struct proc *p = udata;
589	struct plimit *limp;
590	int rss_limit;
591
592	/*
593	 * Set up the initial limits on process VM. Set the maximum resident
594	 * set size to be half of (reasonably) available memory.  Since this
595	 * is a soft limit, it comes into effect only when the system is out
596	 * of memory - half of main memory helps to favor smaller processes,
597	 * and reduces thrashing of the object cache.
598	 */
599	limp = p->p_limit;
600	limp->pl_rlimit[RLIMIT_STACK].rlim_cur = dflssiz;
601	limp->pl_rlimit[RLIMIT_STACK].rlim_max = maxssiz;
602	limp->pl_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz;
603	limp->pl_rlimit[RLIMIT_DATA].rlim_max = maxdsiz;
604	/* limit the limit to no less than 2MB */
605	rss_limit = max(cnt.v_free_count, 512);
606	limp->pl_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
607	limp->pl_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
608}
609
610void
611faultin(p)
612	struct proc *p;
613{
614#ifdef NO_SWAPPING
615
616	PROC_LOCK_ASSERT(p, MA_OWNED);
617	if ((p->p_flag & P_INMEM) == 0)
618		panic("faultin: proc swapped out with NO_SWAPPING!");
619#else /* !NO_SWAPPING */
620	struct thread *td;
621
622	PROC_LOCK_ASSERT(p, MA_OWNED);
623	/*
624	 * If another process is swapping in this process,
625	 * just wait until it finishes.
626	 */
627	if (p->p_flag & P_SWAPPINGIN) {
628		while (p->p_flag & P_SWAPPINGIN)
629			msleep(&p->p_flag, &p->p_mtx, PVM, "faultin", 0);
630		return;
631	}
632	if ((p->p_flag & P_INMEM) == 0) {
633		/*
634		 * Don't let another thread swap process p out while we are
635		 * busy swapping it in.
636		 */
637		++p->p_lock;
638		p->p_flag |= P_SWAPPINGIN;
639		PROC_UNLOCK(p);
640
641		/*
642		 * We hold no lock here because the list of threads
643		 * can not change while all threads in the process are
644		 * swapped out.
645		 */
646		FOREACH_THREAD_IN_PROC(p, td)
647			vm_thread_swapin(td);
648		PROC_LOCK(p);
649		swapclear(p);
650		p->p_swtick = ticks;
651
652		wakeup(&p->p_flag);
653
654		/* Allow other threads to swap p out now. */
655		--p->p_lock;
656	}
657#endif /* NO_SWAPPING */
658}
659
660/*
661 * This swapin algorithm attempts to swap-in processes only if there
662 * is enough space for them.  Of course, if a process waits for a long
663 * time, it will be swapped in anyway.
664 *
665 * Giant is held on entry.
666 */
667/* ARGSUSED*/
668static void
669scheduler(dummy)
670	void *dummy;
671{
672	struct proc *p;
673	struct thread *td;
674	struct proc *pp;
675	int slptime;
676	int swtime;
677	int ppri;
678	int pri;
679
680	mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED);
681	mtx_unlock(&Giant);
682
683loop:
684	if (vm_page_count_min()) {
685		VM_WAIT;
686		thread_lock(&thread0);
687		proc0_rescan = 0;
688		thread_unlock(&thread0);
689		goto loop;
690	}
691
692	pp = NULL;
693	ppri = INT_MIN;
694	sx_slock(&allproc_lock);
695	FOREACH_PROC_IN_SYSTEM(p) {
696		PROC_LOCK(p);
697		if (p->p_flag & (P_SWAPPINGOUT | P_SWAPPINGIN | P_INMEM)) {
698			PROC_UNLOCK(p);
699			continue;
700		}
701		swtime = (ticks - p->p_swtick) / hz;
702		FOREACH_THREAD_IN_PROC(p, td) {
703			/*
704			 * An otherwise runnable thread of a process
705			 * swapped out has only the TDI_SWAPPED bit set.
706			 *
707			 */
708			thread_lock(td);
709			if (td->td_inhibitors == TDI_SWAPPED) {
710				slptime = (ticks - td->td_slptick) / hz;
711				pri = swtime + slptime;
712				if ((td->td_flags & TDF_SWAPINREQ) == 0)
713					pri -= p->p_nice * 8;
714				/*
715				 * if this thread is higher priority
716				 * and there is enough space, then select
717				 * this process instead of the previous
718				 * selection.
719				 */
720				if (pri > ppri) {
721					pp = p;
722					ppri = pri;
723				}
724			}
725			thread_unlock(td);
726		}
727		PROC_UNLOCK(p);
728	}
729	sx_sunlock(&allproc_lock);
730
731	/*
732	 * Nothing to do, back to sleep.
733	 */
734	if ((p = pp) == NULL) {
735		thread_lock(&thread0);
736		if (!proc0_rescan) {
737			TD_SET_IWAIT(&thread0);
738			mi_switch(SW_VOL | SWT_IWAIT, NULL);
739		}
740		proc0_rescan = 0;
741		thread_unlock(&thread0);
742		goto loop;
743	}
744	PROC_LOCK(p);
745
746	/*
747	 * Another process may be bringing or may have already
748	 * brought this process in while we traverse all threads.
749	 * Or, this process may even be being swapped out again.
750	 */
751	if (p->p_flag & (P_INMEM | P_SWAPPINGOUT | P_SWAPPINGIN)) {
752		PROC_UNLOCK(p);
753		thread_lock(&thread0);
754		proc0_rescan = 0;
755		thread_unlock(&thread0);
756		goto loop;
757	}
758
759	/*
760	 * We would like to bring someone in. (only if there is space).
761	 * [What checks the space? ]
762	 */
763	faultin(p);
764	PROC_UNLOCK(p);
765	thread_lock(&thread0);
766	proc0_rescan = 0;
767	thread_unlock(&thread0);
768	goto loop;
769}
770
771void kick_proc0(void)
772{
773	struct thread *td = &thread0;
774
775	/* XXX This will probably cause a LOR in some cases */
776	thread_lock(td);
777	if (TD_AWAITING_INTR(td)) {
778		CTR2(KTR_INTR, "%s: sched_add %d", __func__, 0);
779		TD_CLR_IWAIT(td);
780		sched_add(td, SRQ_INTR);
781	} else {
782		proc0_rescan = 1;
783		CTR2(KTR_INTR, "%s: state %d",
784		    __func__, td->td_state);
785	}
786	thread_unlock(td);
787
788}
789
790
791#ifndef NO_SWAPPING
792
793/*
794 * Swap_idle_threshold1 is the guaranteed swapped in time for a process
795 */
796static int swap_idle_threshold1 = 2;
797SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW,
798    &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process");
799
800/*
801 * Swap_idle_threshold2 is the time that a process can be idle before
802 * it will be swapped out, if idle swapping is enabled.
803 */
804static int swap_idle_threshold2 = 10;
805SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW,
806    &swap_idle_threshold2, 0, "Time before a process will be swapped out");
807
808/*
809 * Swapout is driven by the pageout daemon.  Very simple, we find eligible
810 * procs and swap out their stacks.  We try to always "swap" at least one
811 * process in case we need the room for a swapin.
812 * If any procs have been sleeping/stopped for at least maxslp seconds,
813 * they are swapped.  Else, we swap the longest-sleeping or stopped process,
814 * if any, otherwise the longest-resident process.
815 */
816void
817swapout_procs(action)
818int action;
819{
820	struct proc *p;
821	struct thread *td;
822	int didswap = 0;
823
824retry:
825	sx_slock(&allproc_lock);
826	FOREACH_PROC_IN_SYSTEM(p) {
827		struct vmspace *vm;
828		int minslptime = 100000;
829		int slptime;
830
831		/*
832		 * Watch out for a process in
833		 * creation.  It may have no
834		 * address space or lock yet.
835		 */
836		if (p->p_state == PRS_NEW)
837			continue;
838		/*
839		 * An aio daemon switches its
840		 * address space while running.
841		 * Perform a quick check whether
842		 * a process has P_SYSTEM.
843		 */
844		if ((p->p_flag & P_SYSTEM) != 0)
845			continue;
846		/*
847		 * Do not swapout a process that
848		 * is waiting for VM data
849		 * structures as there is a possible
850		 * deadlock.  Test this first as
851		 * this may block.
852		 *
853		 * Lock the map until swapout
854		 * finishes, or a thread of this
855		 * process may attempt to alter
856		 * the map.
857		 */
858		vm = vmspace_acquire_ref(p);
859		if (vm == NULL)
860			continue;
861		if (!vm_map_trylock(&vm->vm_map))
862			goto nextproc1;
863
864		PROC_LOCK(p);
865		if (p->p_lock != 0 ||
866		    (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT)
867		    ) != 0) {
868			goto nextproc;
869		}
870		/*
871		 * only aiod changes vmspace, however it will be
872		 * skipped because of the if statement above checking
873		 * for P_SYSTEM
874		 */
875		if ((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) != P_INMEM)
876			goto nextproc;
877
878		switch (p->p_state) {
879		default:
880			/* Don't swap out processes in any sort
881			 * of 'special' state. */
882			break;
883
884		case PRS_NORMAL:
885			/*
886			 * do not swapout a realtime process
887			 * Check all the thread groups..
888			 */
889			FOREACH_THREAD_IN_PROC(p, td) {
890				thread_lock(td);
891				if (PRI_IS_REALTIME(td->td_pri_class)) {
892					thread_unlock(td);
893					goto nextproc;
894				}
895				slptime = (ticks - td->td_slptick) / hz;
896				/*
897				 * Guarantee swap_idle_threshold1
898				 * time in memory.
899				 */
900				if (slptime < swap_idle_threshold1) {
901					thread_unlock(td);
902					goto nextproc;
903				}
904
905				/*
906				 * Do not swapout a process if it is
907				 * waiting on a critical event of some
908				 * kind or there is a thread whose
909				 * pageable memory may be accessed.
910				 *
911				 * This could be refined to support
912				 * swapping out a thread.
913				 */
914				if (!thread_safetoswapout(td)) {
915					thread_unlock(td);
916					goto nextproc;
917				}
918				/*
919				 * If the system is under memory stress,
920				 * or if we are swapping
921				 * idle processes >= swap_idle_threshold2,
922				 * then swap the process out.
923				 */
924				if (((action & VM_SWAP_NORMAL) == 0) &&
925				    (((action & VM_SWAP_IDLE) == 0) ||
926				    (slptime < swap_idle_threshold2))) {
927					thread_unlock(td);
928					goto nextproc;
929				}
930
931				if (minslptime > slptime)
932					minslptime = slptime;
933				thread_unlock(td);
934			}
935
936			/*
937			 * If the pageout daemon didn't free enough pages,
938			 * or if this process is idle and the system is
939			 * configured to swap proactively, swap it out.
940			 */
941			if ((action & VM_SWAP_NORMAL) ||
942				((action & VM_SWAP_IDLE) &&
943				 (minslptime > swap_idle_threshold2))) {
944				if (swapout(p) == 0)
945					didswap++;
946				PROC_UNLOCK(p);
947				vm_map_unlock(&vm->vm_map);
948				vmspace_free(vm);
949				sx_sunlock(&allproc_lock);
950				goto retry;
951			}
952		}
953nextproc:
954		PROC_UNLOCK(p);
955		vm_map_unlock(&vm->vm_map);
956nextproc1:
957		vmspace_free(vm);
958		continue;
959	}
960	sx_sunlock(&allproc_lock);
961	/*
962	 * If we swapped something out, and another process needed memory,
963	 * then wakeup the sched process.
964	 */
965	if (didswap)
966		wakeup(&proc0);
967}
968
969static void
970swapclear(p)
971	struct proc *p;
972{
973	struct thread *td;
974
975	PROC_LOCK_ASSERT(p, MA_OWNED);
976
977	FOREACH_THREAD_IN_PROC(p, td) {
978		thread_lock(td);
979		td->td_flags |= TDF_INMEM;
980		td->td_flags &= ~TDF_SWAPINREQ;
981		TD_CLR_SWAPPED(td);
982		if (TD_CAN_RUN(td))
983			setrunnable(td);
984		thread_unlock(td);
985	}
986	p->p_flag &= ~(P_SWAPPINGIN|P_SWAPPINGOUT);
987	p->p_flag |= P_INMEM;
988}
989
990static int
991swapout(p)
992	struct proc *p;
993{
994	struct thread *td;
995
996	PROC_LOCK_ASSERT(p, MA_OWNED);
997#if defined(SWAP_DEBUG)
998	printf("swapping out %d\n", p->p_pid);
999#endif
1000
1001	/*
1002	 * The states of this process and its threads may have changed
1003	 * by now.  Assuming that there is only one pageout daemon thread,
1004	 * this process should still be in memory.
1005	 */
1006	KASSERT((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) == P_INMEM,
1007		("swapout: lost a swapout race?"));
1008
1009	/*
1010	 * remember the process resident count
1011	 */
1012	p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
1013	/*
1014	 * Check and mark all threads before we proceed.
1015	 */
1016	p->p_flag &= ~P_INMEM;
1017	p->p_flag |= P_SWAPPINGOUT;
1018	FOREACH_THREAD_IN_PROC(p, td) {
1019		thread_lock(td);
1020		if (!thread_safetoswapout(td)) {
1021			thread_unlock(td);
1022			swapclear(p);
1023			return (EBUSY);
1024		}
1025		td->td_flags &= ~TDF_INMEM;
1026		TD_SET_SWAPPED(td);
1027		thread_unlock(td);
1028	}
1029	td = FIRST_THREAD_IN_PROC(p);
1030	++td->td_ru.ru_nswap;
1031	PROC_UNLOCK(p);
1032
1033	/*
1034	 * This list is stable because all threads are now prevented from
1035	 * running.  The list is only modified in the context of a running
1036	 * thread in this process.
1037	 */
1038	FOREACH_THREAD_IN_PROC(p, td)
1039		vm_thread_swapout(td);
1040
1041	PROC_LOCK(p);
1042	p->p_flag &= ~P_SWAPPINGOUT;
1043	p->p_swtick = ticks;
1044	return (0);
1045}
1046#endif /* !NO_SWAPPING */
1047