vm_glue.c revision 206823
1/*-
2 * Copyright (c) 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 *	from: @(#)vm_glue.c	8.6 (Berkeley) 1/5/94
33 *
34 *
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
37 *
38 * Permission to use, copy, modify and distribute this software and
39 * its documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
46 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
51 *  School of Computer Science
52 *  Carnegie Mellon University
53 *  Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie the
56 * rights to redistribute these changes.
57 */
58
59#include <sys/cdefs.h>
60__FBSDID("$FreeBSD: head/sys/vm/vm_glue.c 206823 2010-04-19 00:18:14Z alc $");
61
62#include "opt_vm.h"
63#include "opt_kstack_pages.h"
64#include "opt_kstack_max_pages.h"
65
66#include <sys/param.h>
67#include <sys/systm.h>
68#include <sys/limits.h>
69#include <sys/lock.h>
70#include <sys/mutex.h>
71#include <sys/proc.h>
72#include <sys/resourcevar.h>
73#include <sys/sched.h>
74#include <sys/sf_buf.h>
75#include <sys/shm.h>
76#include <sys/vmmeter.h>
77#include <sys/sx.h>
78#include <sys/sysctl.h>
79
80#include <sys/eventhandler.h>
81#include <sys/kernel.h>
82#include <sys/ktr.h>
83#include <sys/unistd.h>
84
85#include <vm/vm.h>
86#include <vm/vm_param.h>
87#include <vm/pmap.h>
88#include <vm/vm_map.h>
89#include <vm/vm_page.h>
90#include <vm/vm_pageout.h>
91#include <vm/vm_object.h>
92#include <vm/vm_kern.h>
93#include <vm/vm_extern.h>
94#include <vm/vm_pager.h>
95#include <vm/swap_pager.h>
96
97extern int maxslp;
98
99/*
100 * System initialization
101 *
102 * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
103 *
104 * Note: run scheduling should be divorced from the vm system.
105 */
106static void scheduler(void *);
107SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_ANY, scheduler, NULL);
108
109#ifndef NO_SWAPPING
110static int swapout(struct proc *);
111static void swapclear(struct proc *);
112static void vm_thread_swapin(struct thread *td);
113static void vm_thread_swapout(struct thread *td);
114#endif
115
116/*
117 * MPSAFE
118 *
119 * WARNING!  This code calls vm_map_check_protection() which only checks
120 * the associated vm_map_entry range.  It does not determine whether the
121 * contents of the memory is actually readable or writable.  In most cases
122 * just checking the vm_map_entry is sufficient within the kernel's address
123 * space.
124 */
125int
126kernacc(addr, len, rw)
127	void *addr;
128	int len, rw;
129{
130	boolean_t rv;
131	vm_offset_t saddr, eaddr;
132	vm_prot_t prot;
133
134	KASSERT((rw & ~VM_PROT_ALL) == 0,
135	    ("illegal ``rw'' argument to kernacc (%x)\n", rw));
136
137	if ((vm_offset_t)addr + len > kernel_map->max_offset ||
138	    (vm_offset_t)addr + len < (vm_offset_t)addr)
139		return (FALSE);
140
141	prot = rw;
142	saddr = trunc_page((vm_offset_t)addr);
143	eaddr = round_page((vm_offset_t)addr + len);
144	vm_map_lock_read(kernel_map);
145	rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
146	vm_map_unlock_read(kernel_map);
147	return (rv == TRUE);
148}
149
150/*
151 * MPSAFE
152 *
153 * WARNING!  This code calls vm_map_check_protection() which only checks
154 * the associated vm_map_entry range.  It does not determine whether the
155 * contents of the memory is actually readable or writable.  vmapbuf(),
156 * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
157 * used in conjuction with this call.
158 */
159int
160useracc(addr, len, rw)
161	void *addr;
162	int len, rw;
163{
164	boolean_t rv;
165	vm_prot_t prot;
166	vm_map_t map;
167
168	KASSERT((rw & ~VM_PROT_ALL) == 0,
169	    ("illegal ``rw'' argument to useracc (%x)\n", rw));
170	prot = rw;
171	map = &curproc->p_vmspace->vm_map;
172	if ((vm_offset_t)addr + len > vm_map_max(map) ||
173	    (vm_offset_t)addr + len < (vm_offset_t)addr) {
174		return (FALSE);
175	}
176	vm_map_lock_read(map);
177	rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
178	    round_page((vm_offset_t)addr + len), prot);
179	vm_map_unlock_read(map);
180	return (rv == TRUE);
181}
182
183int
184vslock(void *addr, size_t len)
185{
186	vm_offset_t end, last, start;
187	vm_size_t npages;
188	int error;
189
190	last = (vm_offset_t)addr + len;
191	start = trunc_page((vm_offset_t)addr);
192	end = round_page(last);
193	if (last < (vm_offset_t)addr || end < (vm_offset_t)addr)
194		return (EINVAL);
195	npages = atop(end - start);
196	if (npages > vm_page_max_wired)
197		return (ENOMEM);
198	PROC_LOCK(curproc);
199	if (ptoa(npages +
200	    pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map))) >
201	    lim_cur(curproc, RLIMIT_MEMLOCK)) {
202		PROC_UNLOCK(curproc);
203		return (ENOMEM);
204	}
205	PROC_UNLOCK(curproc);
206#if 0
207	/*
208	 * XXX - not yet
209	 *
210	 * The limit for transient usage of wired pages should be
211	 * larger than for "permanent" wired pages (mlock()).
212	 *
213	 * Also, the sysctl code, which is the only present user
214	 * of vslock(), does a hard loop on EAGAIN.
215	 */
216	if (npages + cnt.v_wire_count > vm_page_max_wired)
217		return (EAGAIN);
218#endif
219	error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
220	    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
221	/*
222	 * Return EFAULT on error to match copy{in,out}() behaviour
223	 * rather than returning ENOMEM like mlock() would.
224	 */
225	return (error == KERN_SUCCESS ? 0 : EFAULT);
226}
227
228void
229vsunlock(void *addr, size_t len)
230{
231
232	/* Rely on the parameter sanity checks performed by vslock(). */
233	(void)vm_map_unwire(&curproc->p_vmspace->vm_map,
234	    trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
235	    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
236}
237
238/*
239 * Pin the page contained within the given object at the given offset.  If the
240 * page is not resident, allocate and load it using the given object's pager.
241 * Return the pinned page if successful; otherwise, return NULL.
242 */
243static vm_page_t
244vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
245{
246	vm_page_t m, ma[1];
247	vm_pindex_t pindex;
248	int rv;
249
250	VM_OBJECT_LOCK(object);
251	pindex = OFF_TO_IDX(offset);
252	m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
253	if (m->valid != VM_PAGE_BITS_ALL) {
254		ma[0] = m;
255		rv = vm_pager_get_pages(object, ma, 1, 0);
256		m = vm_page_lookup(object, pindex);
257		if (m == NULL)
258			goto out;
259		if (rv != VM_PAGER_OK) {
260			vm_page_lock_queues();
261			vm_page_free(m);
262			vm_page_unlock_queues();
263			m = NULL;
264			goto out;
265		}
266	}
267	vm_page_lock_queues();
268	vm_page_hold(m);
269	vm_page_unlock_queues();
270	vm_page_wakeup(m);
271out:
272	VM_OBJECT_UNLOCK(object);
273	return (m);
274}
275
276/*
277 * Return a CPU private mapping to the page at the given offset within the
278 * given object.  The page is pinned before it is mapped.
279 */
280struct sf_buf *
281vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset)
282{
283	vm_page_t m;
284
285	m = vm_imgact_hold_page(object, offset);
286	if (m == NULL)
287		return (NULL);
288	sched_pin();
289	return (sf_buf_alloc(m, SFB_CPUPRIVATE));
290}
291
292/*
293 * Destroy the given CPU private mapping and unpin the page that it mapped.
294 */
295void
296vm_imgact_unmap_page(struct sf_buf *sf)
297{
298	vm_page_t m;
299
300	m = sf_buf_page(sf);
301	sf_buf_free(sf);
302	sched_unpin();
303	vm_page_lock_queues();
304	vm_page_unhold(m);
305	vm_page_unlock_queues();
306}
307
308void
309vm_sync_icache(vm_map_t map, vm_offset_t va, vm_offset_t sz)
310{
311
312	pmap_sync_icache(map->pmap, va, sz);
313}
314
315struct kstack_cache_entry {
316	vm_object_t ksobj;
317	struct kstack_cache_entry *next_ks_entry;
318};
319
320static struct kstack_cache_entry *kstack_cache;
321static int kstack_cache_size = 128;
322static int kstacks;
323static struct mtx kstack_cache_mtx;
324SYSCTL_INT(_vm, OID_AUTO, kstack_cache_size, CTLFLAG_RW, &kstack_cache_size, 0,
325    "");
326SYSCTL_INT(_vm, OID_AUTO, kstacks, CTLFLAG_RD, &kstacks, 0,
327    "");
328
329#ifndef KSTACK_MAX_PAGES
330#define KSTACK_MAX_PAGES 32
331#endif
332
333/*
334 * Create the kernel stack (including pcb for i386) for a new thread.
335 * This routine directly affects the fork perf for a process and
336 * create performance for a thread.
337 */
338int
339vm_thread_new(struct thread *td, int pages)
340{
341	vm_object_t ksobj;
342	vm_offset_t ks;
343	vm_page_t m, ma[KSTACK_MAX_PAGES];
344	struct kstack_cache_entry *ks_ce;
345	int i;
346
347	/* Bounds check */
348	if (pages <= 1)
349		pages = KSTACK_PAGES;
350	else if (pages > KSTACK_MAX_PAGES)
351		pages = KSTACK_MAX_PAGES;
352
353	if (pages == KSTACK_PAGES) {
354		mtx_lock(&kstack_cache_mtx);
355		if (kstack_cache != NULL) {
356			ks_ce = kstack_cache;
357			kstack_cache = ks_ce->next_ks_entry;
358			mtx_unlock(&kstack_cache_mtx);
359
360			td->td_kstack_obj = ks_ce->ksobj;
361			td->td_kstack = (vm_offset_t)ks_ce;
362			td->td_kstack_pages = KSTACK_PAGES;
363			return (1);
364		}
365		mtx_unlock(&kstack_cache_mtx);
366	}
367
368	/*
369	 * Allocate an object for the kstack.
370	 */
371	ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
372
373	/*
374	 * Get a kernel virtual address for this thread's kstack.
375	 */
376#if defined(__mips__)
377	/*
378	 * We need to align the kstack's mapped address to fit within
379	 * a single TLB entry.
380	 */
381	ks = kmem_alloc_nofault_space(kernel_map,
382	    (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE, VMFS_TLB_ALIGNED_SPACE);
383#else
384	ks = kmem_alloc_nofault(kernel_map,
385	   (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
386#endif
387	if (ks == 0) {
388		printf("vm_thread_new: kstack allocation failed\n");
389		vm_object_deallocate(ksobj);
390		return (0);
391	}
392
393	atomic_add_int(&kstacks, 1);
394	if (KSTACK_GUARD_PAGES != 0) {
395		pmap_qremove(ks, KSTACK_GUARD_PAGES);
396		ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
397	}
398	td->td_kstack_obj = ksobj;
399	td->td_kstack = ks;
400	/*
401	 * Knowing the number of pages allocated is useful when you
402	 * want to deallocate them.
403	 */
404	td->td_kstack_pages = pages;
405	/*
406	 * For the length of the stack, link in a real page of ram for each
407	 * page of stack.
408	 */
409	VM_OBJECT_LOCK(ksobj);
410	for (i = 0; i < pages; i++) {
411		/*
412		 * Get a kernel stack page.
413		 */
414		m = vm_page_grab(ksobj, i, VM_ALLOC_NOBUSY |
415		    VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
416		ma[i] = m;
417		m->valid = VM_PAGE_BITS_ALL;
418	}
419	VM_OBJECT_UNLOCK(ksobj);
420	pmap_qenter(ks, ma, pages);
421	return (1);
422}
423
424static void
425vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages)
426{
427	vm_page_t m;
428	int i;
429
430	atomic_add_int(&kstacks, -1);
431	pmap_qremove(ks, pages);
432	VM_OBJECT_LOCK(ksobj);
433	for (i = 0; i < pages; i++) {
434		m = vm_page_lookup(ksobj, i);
435		if (m == NULL)
436			panic("vm_thread_dispose: kstack already missing?");
437		vm_page_lock_queues();
438		vm_page_unwire(m, 0);
439		vm_page_free(m);
440		vm_page_unlock_queues();
441	}
442	VM_OBJECT_UNLOCK(ksobj);
443	vm_object_deallocate(ksobj);
444	kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
445	    (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
446}
447
448/*
449 * Dispose of a thread's kernel stack.
450 */
451void
452vm_thread_dispose(struct thread *td)
453{
454	vm_object_t ksobj;
455	vm_offset_t ks;
456	struct kstack_cache_entry *ks_ce;
457	int pages;
458
459	pages = td->td_kstack_pages;
460	ksobj = td->td_kstack_obj;
461	ks = td->td_kstack;
462	td->td_kstack = 0;
463	td->td_kstack_pages = 0;
464	if (pages == KSTACK_PAGES && kstacks <= kstack_cache_size) {
465		ks_ce = (struct kstack_cache_entry *)ks;
466		ks_ce->ksobj = ksobj;
467		mtx_lock(&kstack_cache_mtx);
468		ks_ce->next_ks_entry = kstack_cache;
469		kstack_cache = ks_ce;
470		mtx_unlock(&kstack_cache_mtx);
471		return;
472	}
473	vm_thread_stack_dispose(ksobj, ks, pages);
474}
475
476static void
477vm_thread_stack_lowmem(void *nulll)
478{
479	struct kstack_cache_entry *ks_ce, *ks_ce1;
480
481	mtx_lock(&kstack_cache_mtx);
482	ks_ce = kstack_cache;
483	kstack_cache = NULL;
484	mtx_unlock(&kstack_cache_mtx);
485
486	while (ks_ce != NULL) {
487		ks_ce1 = ks_ce;
488		ks_ce = ks_ce->next_ks_entry;
489
490		vm_thread_stack_dispose(ks_ce1->ksobj, (vm_offset_t)ks_ce1,
491		    KSTACK_PAGES);
492	}
493}
494
495static void
496kstack_cache_init(void *nulll)
497{
498
499	EVENTHANDLER_REGISTER(vm_lowmem, vm_thread_stack_lowmem, NULL,
500	    EVENTHANDLER_PRI_ANY);
501}
502
503MTX_SYSINIT(kstack_cache, &kstack_cache_mtx, "kstkch", MTX_DEF);
504SYSINIT(vm_kstacks, SI_SUB_KTHREAD_INIT, SI_ORDER_ANY, kstack_cache_init, NULL);
505
506#ifndef NO_SWAPPING
507/*
508 * Allow a thread's kernel stack to be paged out.
509 */
510static void
511vm_thread_swapout(struct thread *td)
512{
513	vm_object_t ksobj;
514	vm_page_t m;
515	int i, pages;
516
517	cpu_thread_swapout(td);
518	pages = td->td_kstack_pages;
519	ksobj = td->td_kstack_obj;
520	pmap_qremove(td->td_kstack, pages);
521	VM_OBJECT_LOCK(ksobj);
522	for (i = 0; i < pages; i++) {
523		m = vm_page_lookup(ksobj, i);
524		if (m == NULL)
525			panic("vm_thread_swapout: kstack already missing?");
526		vm_page_dirty(m);
527		vm_page_lock_queues();
528		vm_page_unwire(m, 0);
529		vm_page_unlock_queues();
530	}
531	VM_OBJECT_UNLOCK(ksobj);
532}
533
534/*
535 * Bring the kernel stack for a specified thread back in.
536 */
537static void
538vm_thread_swapin(struct thread *td)
539{
540	vm_object_t ksobj;
541	vm_page_t m, ma[KSTACK_MAX_PAGES];
542	int i, pages, rv;
543
544	pages = td->td_kstack_pages;
545	ksobj = td->td_kstack_obj;
546	VM_OBJECT_LOCK(ksobj);
547	for (i = 0; i < pages; i++) {
548		m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY |
549		    VM_ALLOC_WIRED);
550		if (m->valid != VM_PAGE_BITS_ALL) {
551			rv = vm_pager_get_pages(ksobj, &m, 1, 0);
552			if (rv != VM_PAGER_OK)
553				panic("vm_thread_swapin: cannot get kstack for proc: %d", td->td_proc->p_pid);
554			m = vm_page_lookup(ksobj, i);
555		}
556		ma[i] = m;
557		vm_page_wakeup(m);
558	}
559	VM_OBJECT_UNLOCK(ksobj);
560	pmap_qenter(td->td_kstack, ma, pages);
561	cpu_thread_swapin(td);
562}
563#endif /* !NO_SWAPPING */
564
565/*
566 * Implement fork's actions on an address space.
567 * Here we arrange for the address space to be copied or referenced,
568 * allocate a user struct (pcb and kernel stack), then call the
569 * machine-dependent layer to fill those in and make the new process
570 * ready to run.  The new process is set up so that it returns directly
571 * to user mode to avoid stack copying and relocation problems.
572 */
573int
574vm_forkproc(td, p2, td2, vm2, flags)
575	struct thread *td;
576	struct proc *p2;
577	struct thread *td2;
578	struct vmspace *vm2;
579	int flags;
580{
581	struct proc *p1 = td->td_proc;
582	int error;
583
584	if ((flags & RFPROC) == 0) {
585		/*
586		 * Divorce the memory, if it is shared, essentially
587		 * this changes shared memory amongst threads, into
588		 * COW locally.
589		 */
590		if ((flags & RFMEM) == 0) {
591			if (p1->p_vmspace->vm_refcnt > 1) {
592				error = vmspace_unshare(p1);
593				if (error)
594					return (error);
595			}
596		}
597		cpu_fork(td, p2, td2, flags);
598		return (0);
599	}
600
601	if (flags & RFMEM) {
602		p2->p_vmspace = p1->p_vmspace;
603		atomic_add_int(&p1->p_vmspace->vm_refcnt, 1);
604	}
605
606	while (vm_page_count_severe()) {
607		VM_WAIT;
608	}
609
610	if ((flags & RFMEM) == 0) {
611		p2->p_vmspace = vm2;
612		if (p1->p_vmspace->vm_shm)
613			shmfork(p1, p2);
614	}
615
616	/*
617	 * cpu_fork will copy and update the pcb, set up the kernel stack,
618	 * and make the child ready to run.
619	 */
620	cpu_fork(td, p2, td2, flags);
621	return (0);
622}
623
624/*
625 * Called after process has been wait(2)'ed apon and is being reaped.
626 * The idea is to reclaim resources that we could not reclaim while
627 * the process was still executing.
628 */
629void
630vm_waitproc(p)
631	struct proc *p;
632{
633
634	vmspace_exitfree(p);		/* and clean-out the vmspace */
635}
636
637void
638faultin(p)
639	struct proc *p;
640{
641#ifdef NO_SWAPPING
642
643	PROC_LOCK_ASSERT(p, MA_OWNED);
644	if ((p->p_flag & P_INMEM) == 0)
645		panic("faultin: proc swapped out with NO_SWAPPING!");
646#else /* !NO_SWAPPING */
647	struct thread *td;
648
649	PROC_LOCK_ASSERT(p, MA_OWNED);
650	/*
651	 * If another process is swapping in this process,
652	 * just wait until it finishes.
653	 */
654	if (p->p_flag & P_SWAPPINGIN) {
655		while (p->p_flag & P_SWAPPINGIN)
656			msleep(&p->p_flag, &p->p_mtx, PVM, "faultin", 0);
657		return;
658	}
659	if ((p->p_flag & P_INMEM) == 0) {
660		/*
661		 * Don't let another thread swap process p out while we are
662		 * busy swapping it in.
663		 */
664		++p->p_lock;
665		p->p_flag |= P_SWAPPINGIN;
666		PROC_UNLOCK(p);
667
668		/*
669		 * We hold no lock here because the list of threads
670		 * can not change while all threads in the process are
671		 * swapped out.
672		 */
673		FOREACH_THREAD_IN_PROC(p, td)
674			vm_thread_swapin(td);
675		PROC_LOCK(p);
676		swapclear(p);
677		p->p_swtick = ticks;
678
679		wakeup(&p->p_flag);
680
681		/* Allow other threads to swap p out now. */
682		--p->p_lock;
683	}
684#endif /* NO_SWAPPING */
685}
686
687/*
688 * This swapin algorithm attempts to swap-in processes only if there
689 * is enough space for them.  Of course, if a process waits for a long
690 * time, it will be swapped in anyway.
691 *
692 * Giant is held on entry.
693 */
694/* ARGSUSED*/
695static void
696scheduler(dummy)
697	void *dummy;
698{
699	struct proc *p;
700	struct thread *td;
701	struct proc *pp;
702	int slptime;
703	int swtime;
704	int ppri;
705	int pri;
706
707	mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED);
708	mtx_unlock(&Giant);
709
710loop:
711	if (vm_page_count_min()) {
712		VM_WAIT;
713		goto loop;
714	}
715
716	pp = NULL;
717	ppri = INT_MIN;
718	sx_slock(&allproc_lock);
719	FOREACH_PROC_IN_SYSTEM(p) {
720		PROC_LOCK(p);
721		if (p->p_flag & (P_SWAPPINGOUT | P_SWAPPINGIN | P_INMEM)) {
722			PROC_UNLOCK(p);
723			continue;
724		}
725		swtime = (ticks - p->p_swtick) / hz;
726		FOREACH_THREAD_IN_PROC(p, td) {
727			/*
728			 * An otherwise runnable thread of a process
729			 * swapped out has only the TDI_SWAPPED bit set.
730			 *
731			 */
732			thread_lock(td);
733			if (td->td_inhibitors == TDI_SWAPPED) {
734				slptime = (ticks - td->td_slptick) / hz;
735				pri = swtime + slptime;
736				if ((td->td_flags & TDF_SWAPINREQ) == 0)
737					pri -= p->p_nice * 8;
738				/*
739				 * if this thread is higher priority
740				 * and there is enough space, then select
741				 * this process instead of the previous
742				 * selection.
743				 */
744				if (pri > ppri) {
745					pp = p;
746					ppri = pri;
747				}
748			}
749			thread_unlock(td);
750		}
751		PROC_UNLOCK(p);
752	}
753	sx_sunlock(&allproc_lock);
754
755	/*
756	 * Nothing to do, back to sleep.
757	 */
758	if ((p = pp) == NULL) {
759		tsleep(&proc0, PVM, "sched", maxslp * hz / 2);
760		goto loop;
761	}
762	PROC_LOCK(p);
763
764	/*
765	 * Another process may be bringing or may have already
766	 * brought this process in while we traverse all threads.
767	 * Or, this process may even be being swapped out again.
768	 */
769	if (p->p_flag & (P_INMEM | P_SWAPPINGOUT | P_SWAPPINGIN)) {
770		PROC_UNLOCK(p);
771		goto loop;
772	}
773
774	/*
775	 * We would like to bring someone in. (only if there is space).
776	 * [What checks the space? ]
777	 */
778	faultin(p);
779	PROC_UNLOCK(p);
780	goto loop;
781}
782
783void
784kick_proc0(void)
785{
786
787	wakeup(&proc0);
788}
789
790#ifndef NO_SWAPPING
791
792/*
793 * Swap_idle_threshold1 is the guaranteed swapped in time for a process
794 */
795static int swap_idle_threshold1 = 2;
796SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW,
797    &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process");
798
799/*
800 * Swap_idle_threshold2 is the time that a process can be idle before
801 * it will be swapped out, if idle swapping is enabled.
802 */
803static int swap_idle_threshold2 = 10;
804SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW,
805    &swap_idle_threshold2, 0, "Time before a process will be swapped out");
806
807/*
808 * Swapout is driven by the pageout daemon.  Very simple, we find eligible
809 * procs and swap out their stacks.  We try to always "swap" at least one
810 * process in case we need the room for a swapin.
811 * If any procs have been sleeping/stopped for at least maxslp seconds,
812 * they are swapped.  Else, we swap the longest-sleeping or stopped process,
813 * if any, otherwise the longest-resident process.
814 */
815void
816swapout_procs(action)
817int action;
818{
819	struct proc *p;
820	struct thread *td;
821	int didswap = 0;
822
823retry:
824	sx_slock(&allproc_lock);
825	FOREACH_PROC_IN_SYSTEM(p) {
826		struct vmspace *vm;
827		int minslptime = 100000;
828		int slptime;
829
830		/*
831		 * Watch out for a process in
832		 * creation.  It may have no
833		 * address space or lock yet.
834		 */
835		if (p->p_state == PRS_NEW)
836			continue;
837		/*
838		 * An aio daemon switches its
839		 * address space while running.
840		 * Perform a quick check whether
841		 * a process has P_SYSTEM.
842		 */
843		if ((p->p_flag & P_SYSTEM) != 0)
844			continue;
845		/*
846		 * Do not swapout a process that
847		 * is waiting for VM data
848		 * structures as there is a possible
849		 * deadlock.  Test this first as
850		 * this may block.
851		 *
852		 * Lock the map until swapout
853		 * finishes, or a thread of this
854		 * process may attempt to alter
855		 * the map.
856		 */
857		vm = vmspace_acquire_ref(p);
858		if (vm == NULL)
859			continue;
860		if (!vm_map_trylock(&vm->vm_map))
861			goto nextproc1;
862
863		PROC_LOCK(p);
864		if (p->p_lock != 0 ||
865		    (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT)
866		    ) != 0) {
867			goto nextproc;
868		}
869		/*
870		 * only aiod changes vmspace, however it will be
871		 * skipped because of the if statement above checking
872		 * for P_SYSTEM
873		 */
874		if ((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) != P_INMEM)
875			goto nextproc;
876
877		switch (p->p_state) {
878		default:
879			/* Don't swap out processes in any sort
880			 * of 'special' state. */
881			break;
882
883		case PRS_NORMAL:
884			/*
885			 * do not swapout a realtime process
886			 * Check all the thread groups..
887			 */
888			FOREACH_THREAD_IN_PROC(p, td) {
889				thread_lock(td);
890				if (PRI_IS_REALTIME(td->td_pri_class)) {
891					thread_unlock(td);
892					goto nextproc;
893				}
894				slptime = (ticks - td->td_slptick) / hz;
895				/*
896				 * Guarantee swap_idle_threshold1
897				 * time in memory.
898				 */
899				if (slptime < swap_idle_threshold1) {
900					thread_unlock(td);
901					goto nextproc;
902				}
903
904				/*
905				 * Do not swapout a process if it is
906				 * waiting on a critical event of some
907				 * kind or there is a thread whose
908				 * pageable memory may be accessed.
909				 *
910				 * This could be refined to support
911				 * swapping out a thread.
912				 */
913				if (!thread_safetoswapout(td)) {
914					thread_unlock(td);
915					goto nextproc;
916				}
917				/*
918				 * If the system is under memory stress,
919				 * or if we are swapping
920				 * idle processes >= swap_idle_threshold2,
921				 * then swap the process out.
922				 */
923				if (((action & VM_SWAP_NORMAL) == 0) &&
924				    (((action & VM_SWAP_IDLE) == 0) ||
925				    (slptime < swap_idle_threshold2))) {
926					thread_unlock(td);
927					goto nextproc;
928				}
929
930				if (minslptime > slptime)
931					minslptime = slptime;
932				thread_unlock(td);
933			}
934
935			/*
936			 * If the pageout daemon didn't free enough pages,
937			 * or if this process is idle and the system is
938			 * configured to swap proactively, swap it out.
939			 */
940			if ((action & VM_SWAP_NORMAL) ||
941				((action & VM_SWAP_IDLE) &&
942				 (minslptime > swap_idle_threshold2))) {
943				if (swapout(p) == 0)
944					didswap++;
945				PROC_UNLOCK(p);
946				vm_map_unlock(&vm->vm_map);
947				vmspace_free(vm);
948				sx_sunlock(&allproc_lock);
949				goto retry;
950			}
951		}
952nextproc:
953		PROC_UNLOCK(p);
954		vm_map_unlock(&vm->vm_map);
955nextproc1:
956		vmspace_free(vm);
957		continue;
958	}
959	sx_sunlock(&allproc_lock);
960	/*
961	 * If we swapped something out, and another process needed memory,
962	 * then wakeup the sched process.
963	 */
964	if (didswap)
965		wakeup(&proc0);
966}
967
968static void
969swapclear(p)
970	struct proc *p;
971{
972	struct thread *td;
973
974	PROC_LOCK_ASSERT(p, MA_OWNED);
975
976	FOREACH_THREAD_IN_PROC(p, td) {
977		thread_lock(td);
978		td->td_flags |= TDF_INMEM;
979		td->td_flags &= ~TDF_SWAPINREQ;
980		TD_CLR_SWAPPED(td);
981		if (TD_CAN_RUN(td))
982			if (setrunnable(td)) {
983#ifdef INVARIANTS
984				/*
985				 * XXX: We just cleared TDI_SWAPPED
986				 * above and set TDF_INMEM, so this
987				 * should never happen.
988				 */
989				panic("not waking up swapper");
990#endif
991			}
992		thread_unlock(td);
993	}
994	p->p_flag &= ~(P_SWAPPINGIN|P_SWAPPINGOUT);
995	p->p_flag |= P_INMEM;
996}
997
998static int
999swapout(p)
1000	struct proc *p;
1001{
1002	struct thread *td;
1003
1004	PROC_LOCK_ASSERT(p, MA_OWNED);
1005#if defined(SWAP_DEBUG)
1006	printf("swapping out %d\n", p->p_pid);
1007#endif
1008
1009	/*
1010	 * The states of this process and its threads may have changed
1011	 * by now.  Assuming that there is only one pageout daemon thread,
1012	 * this process should still be in memory.
1013	 */
1014	KASSERT((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) == P_INMEM,
1015		("swapout: lost a swapout race?"));
1016
1017	/*
1018	 * remember the process resident count
1019	 */
1020	p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
1021	/*
1022	 * Check and mark all threads before we proceed.
1023	 */
1024	p->p_flag &= ~P_INMEM;
1025	p->p_flag |= P_SWAPPINGOUT;
1026	FOREACH_THREAD_IN_PROC(p, td) {
1027		thread_lock(td);
1028		if (!thread_safetoswapout(td)) {
1029			thread_unlock(td);
1030			swapclear(p);
1031			return (EBUSY);
1032		}
1033		td->td_flags &= ~TDF_INMEM;
1034		TD_SET_SWAPPED(td);
1035		thread_unlock(td);
1036	}
1037	td = FIRST_THREAD_IN_PROC(p);
1038	++td->td_ru.ru_nswap;
1039	PROC_UNLOCK(p);
1040
1041	/*
1042	 * This list is stable because all threads are now prevented from
1043	 * running.  The list is only modified in the context of a running
1044	 * thread in this process.
1045	 */
1046	FOREACH_THREAD_IN_PROC(p, td)
1047		vm_thread_swapout(td);
1048
1049	PROC_LOCK(p);
1050	p->p_flag &= ~P_SWAPPINGOUT;
1051	p->p_swtick = ticks;
1052	return (0);
1053}
1054#endif /* !NO_SWAPPING */
1055