1/*-
2 * Copyright (c) 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 *	from: @(#)vm_glue.c	8.6 (Berkeley) 1/5/94
33 *
34 *
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
37 *
38 * Permission to use, copy, modify and distribute this software and
39 * its documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
46 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
51 *  School of Computer Science
52 *  Carnegie Mellon University
53 *  Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie the
56 * rights to redistribute these changes.
57 */
58
59#include <sys/cdefs.h>
60__FBSDID("$FreeBSD$");
61
62#include "opt_vm.h"
63#include "opt_kstack_pages.h"
64#include "opt_kstack_max_pages.h"
65#include "opt_kstack_usage_prof.h"
66
67#include <sys/param.h>
68#include <sys/systm.h>
69#include <sys/limits.h>
70#include <sys/lock.h>
71#include <sys/malloc.h>
72#include <sys/mutex.h>
73#include <sys/proc.h>
74#include <sys/racct.h>
75#include <sys/resourcevar.h>
76#include <sys/rwlock.h>
77#include <sys/sched.h>
78#include <sys/sf_buf.h>
79#include <sys/shm.h>
80#include <sys/vmmeter.h>
81#include <sys/vmem.h>
82#include <sys/sx.h>
83#include <sys/sysctl.h>
84#include <sys/_kstack_cache.h>
85#include <sys/eventhandler.h>
86#include <sys/kernel.h>
87#include <sys/ktr.h>
88#include <sys/unistd.h>
89
90#include <vm/vm.h>
91#include <vm/vm_param.h>
92#include <vm/pmap.h>
93#include <vm/vm_map.h>
94#include <vm/vm_page.h>
95#include <vm/vm_pageout.h>
96#include <vm/vm_object.h>
97#include <vm/vm_kern.h>
98#include <vm/vm_extern.h>
99#include <vm/vm_pager.h>
100#include <vm/swap_pager.h>
101
102#include <machine/cpu.h>
103
104#ifndef NO_SWAPPING
105static int swapout(struct proc *);
106static void swapclear(struct proc *);
107static void vm_thread_swapin(struct thread *td);
108static void vm_thread_swapout(struct thread *td);
109#endif
110
111/*
112 * MPSAFE
113 *
114 * WARNING!  This code calls vm_map_check_protection() which only checks
115 * the associated vm_map_entry range.  It does not determine whether the
116 * contents of the memory is actually readable or writable.  In most cases
117 * just checking the vm_map_entry is sufficient within the kernel's address
118 * space.
119 */
120int
121kernacc(addr, len, rw)
122	void *addr;
123	int len, rw;
124{
125	boolean_t rv;
126	vm_offset_t saddr, eaddr;
127	vm_prot_t prot;
128
129	KASSERT((rw & ~VM_PROT_ALL) == 0,
130	    ("illegal ``rw'' argument to kernacc (%x)\n", rw));
131
132	if ((vm_offset_t)addr + len > kernel_map->max_offset ||
133	    (vm_offset_t)addr + len < (vm_offset_t)addr)
134		return (FALSE);
135
136	prot = rw;
137	saddr = trunc_page((vm_offset_t)addr);
138	eaddr = round_page((vm_offset_t)addr + len);
139	vm_map_lock_read(kernel_map);
140	rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
141	vm_map_unlock_read(kernel_map);
142	return (rv == TRUE);
143}
144
145/*
146 * MPSAFE
147 *
148 * WARNING!  This code calls vm_map_check_protection() which only checks
149 * the associated vm_map_entry range.  It does not determine whether the
150 * contents of the memory is actually readable or writable.  vmapbuf(),
151 * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
152 * used in conjunction with this call.
153 */
154int
155useracc(addr, len, rw)
156	void *addr;
157	int len, rw;
158{
159	boolean_t rv;
160	vm_prot_t prot;
161	vm_map_t map;
162
163	KASSERT((rw & ~VM_PROT_ALL) == 0,
164	    ("illegal ``rw'' argument to useracc (%x)\n", rw));
165	prot = rw;
166	map = &curproc->p_vmspace->vm_map;
167	if ((vm_offset_t)addr + len > vm_map_max(map) ||
168	    (vm_offset_t)addr + len < (vm_offset_t)addr) {
169		return (FALSE);
170	}
171	vm_map_lock_read(map);
172	rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
173	    round_page((vm_offset_t)addr + len), prot);
174	vm_map_unlock_read(map);
175	return (rv == TRUE);
176}
177
178int
179vslock(void *addr, size_t len)
180{
181	vm_offset_t end, last, start;
182	vm_size_t npages;
183	int error;
184
185	last = (vm_offset_t)addr + len;
186	start = trunc_page((vm_offset_t)addr);
187	end = round_page(last);
188	if (last < (vm_offset_t)addr || end < (vm_offset_t)addr)
189		return (EINVAL);
190	npages = atop(end - start);
191	if (npages > vm_page_max_wired)
192		return (ENOMEM);
193#if 0
194	/*
195	 * XXX - not yet
196	 *
197	 * The limit for transient usage of wired pages should be
198	 * larger than for "permanent" wired pages (mlock()).
199	 *
200	 * Also, the sysctl code, which is the only present user
201	 * of vslock(), does a hard loop on EAGAIN.
202	 */
203	if (npages + vm_cnt.v_wire_count > vm_page_max_wired)
204		return (EAGAIN);
205#endif
206	error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
207	    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
208	/*
209	 * Return EFAULT on error to match copy{in,out}() behaviour
210	 * rather than returning ENOMEM like mlock() would.
211	 */
212	return (error == KERN_SUCCESS ? 0 : EFAULT);
213}
214
215void
216vsunlock(void *addr, size_t len)
217{
218
219	/* Rely on the parameter sanity checks performed by vslock(). */
220	(void)vm_map_unwire(&curproc->p_vmspace->vm_map,
221	    trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
222	    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
223}
224
225/*
226 * Pin the page contained within the given object at the given offset.  If the
227 * page is not resident, allocate and load it using the given object's pager.
228 * Return the pinned page if successful; otherwise, return NULL.
229 */
230static vm_page_t
231vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
232{
233	vm_page_t m;
234	vm_pindex_t pindex;
235	int rv;
236
237	VM_OBJECT_WLOCK(object);
238	pindex = OFF_TO_IDX(offset);
239	m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY);
240	if (m->valid != VM_PAGE_BITS_ALL) {
241		vm_page_xbusy(m);
242		rv = vm_pager_get_pages(object, &m, 1, NULL, NULL);
243		if (rv != VM_PAGER_OK) {
244			vm_page_lock(m);
245			vm_page_free(m);
246			vm_page_unlock(m);
247			m = NULL;
248			goto out;
249		}
250		vm_page_xunbusy(m);
251	}
252	vm_page_lock(m);
253	vm_page_hold(m);
254	vm_page_activate(m);
255	vm_page_unlock(m);
256out:
257	VM_OBJECT_WUNLOCK(object);
258	return (m);
259}
260
261/*
262 * Return a CPU private mapping to the page at the given offset within the
263 * given object.  The page is pinned before it is mapped.
264 */
265struct sf_buf *
266vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset)
267{
268	vm_page_t m;
269
270	m = vm_imgact_hold_page(object, offset);
271	if (m == NULL)
272		return (NULL);
273	sched_pin();
274	return (sf_buf_alloc(m, SFB_CPUPRIVATE));
275}
276
277/*
278 * Destroy the given CPU private mapping and unpin the page that it mapped.
279 */
280void
281vm_imgact_unmap_page(struct sf_buf *sf)
282{
283	vm_page_t m;
284
285	m = sf_buf_page(sf);
286	sf_buf_free(sf);
287	sched_unpin();
288	vm_page_lock(m);
289	vm_page_unhold(m);
290	vm_page_unlock(m);
291}
292
293void
294vm_sync_icache(vm_map_t map, vm_offset_t va, vm_offset_t sz)
295{
296
297	pmap_sync_icache(map->pmap, va, sz);
298}
299
300struct kstack_cache_entry *kstack_cache;
301static int kstack_cache_size = 128;
302static int kstacks;
303static struct mtx kstack_cache_mtx;
304MTX_SYSINIT(kstack_cache, &kstack_cache_mtx, "kstkch", MTX_DEF);
305
306SYSCTL_INT(_vm, OID_AUTO, kstack_cache_size, CTLFLAG_RW, &kstack_cache_size, 0,
307    "");
308SYSCTL_INT(_vm, OID_AUTO, kstacks, CTLFLAG_RD, &kstacks, 0,
309    "");
310
311#ifndef KSTACK_MAX_PAGES
312#define KSTACK_MAX_PAGES 32
313#endif
314
315/*
316 * Create the kernel stack (including pcb for i386) for a new thread.
317 * This routine directly affects the fork perf for a process and
318 * create performance for a thread.
319 */
320int
321vm_thread_new(struct thread *td, int pages)
322{
323	vm_object_t ksobj;
324	vm_offset_t ks;
325	vm_page_t m, ma[KSTACK_MAX_PAGES];
326	struct kstack_cache_entry *ks_ce;
327	int i;
328
329	/* Bounds check */
330	if (pages <= 1)
331		pages = kstack_pages;
332	else if (pages > KSTACK_MAX_PAGES)
333		pages = KSTACK_MAX_PAGES;
334
335	if (pages == kstack_pages) {
336		mtx_lock(&kstack_cache_mtx);
337		if (kstack_cache != NULL) {
338			ks_ce = kstack_cache;
339			kstack_cache = ks_ce->next_ks_entry;
340			mtx_unlock(&kstack_cache_mtx);
341
342			td->td_kstack_obj = ks_ce->ksobj;
343			td->td_kstack = (vm_offset_t)ks_ce;
344			td->td_kstack_pages = kstack_pages;
345			return (1);
346		}
347		mtx_unlock(&kstack_cache_mtx);
348	}
349
350	/*
351	 * Allocate an object for the kstack.
352	 */
353	ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
354
355	/*
356	 * Get a kernel virtual address for this thread's kstack.
357	 */
358#if defined(__mips__)
359	/*
360	 * We need to align the kstack's mapped address to fit within
361	 * a single TLB entry.
362	 */
363	if (vmem_xalloc(kernel_arena, (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE,
364	    PAGE_SIZE * 2, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
365	    M_BESTFIT | M_NOWAIT, &ks)) {
366		ks = 0;
367	}
368#else
369	ks = kva_alloc((pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
370#endif
371	if (ks == 0) {
372		printf("vm_thread_new: kstack allocation failed\n");
373		vm_object_deallocate(ksobj);
374		return (0);
375	}
376
377	atomic_add_int(&kstacks, 1);
378	if (KSTACK_GUARD_PAGES != 0) {
379		pmap_qremove(ks, KSTACK_GUARD_PAGES);
380		ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
381	}
382	td->td_kstack_obj = ksobj;
383	td->td_kstack = ks;
384	/*
385	 * Knowing the number of pages allocated is useful when you
386	 * want to deallocate them.
387	 */
388	td->td_kstack_pages = pages;
389	/*
390	 * For the length of the stack, link in a real page of ram for each
391	 * page of stack.
392	 */
393	VM_OBJECT_WLOCK(ksobj);
394	for (i = 0; i < pages; i++) {
395		/*
396		 * Get a kernel stack page.
397		 */
398		m = vm_page_grab(ksobj, i, VM_ALLOC_NOBUSY |
399		    VM_ALLOC_NORMAL | VM_ALLOC_WIRED);
400		ma[i] = m;
401		m->valid = VM_PAGE_BITS_ALL;
402	}
403	VM_OBJECT_WUNLOCK(ksobj);
404	pmap_qenter(ks, ma, pages);
405	return (1);
406}
407
408static void
409vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages)
410{
411	vm_page_t m;
412	int i;
413
414	atomic_add_int(&kstacks, -1);
415	pmap_qremove(ks, pages);
416	VM_OBJECT_WLOCK(ksobj);
417	for (i = 0; i < pages; i++) {
418		m = vm_page_lookup(ksobj, i);
419		if (m == NULL)
420			panic("vm_thread_dispose: kstack already missing?");
421		vm_page_lock(m);
422		vm_page_unwire(m, PQ_NONE);
423		vm_page_free(m);
424		vm_page_unlock(m);
425	}
426	VM_OBJECT_WUNLOCK(ksobj);
427	vm_object_deallocate(ksobj);
428	kva_free(ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
429	    (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
430}
431
432/*
433 * Dispose of a thread's kernel stack.
434 */
435void
436vm_thread_dispose(struct thread *td)
437{
438	vm_object_t ksobj;
439	vm_offset_t ks;
440	struct kstack_cache_entry *ks_ce;
441	int pages;
442
443	pages = td->td_kstack_pages;
444	ksobj = td->td_kstack_obj;
445	ks = td->td_kstack;
446	td->td_kstack = 0;
447	td->td_kstack_pages = 0;
448	if (pages == kstack_pages && kstacks <= kstack_cache_size) {
449		ks_ce = (struct kstack_cache_entry *)ks;
450		ks_ce->ksobj = ksobj;
451		mtx_lock(&kstack_cache_mtx);
452		ks_ce->next_ks_entry = kstack_cache;
453		kstack_cache = ks_ce;
454		mtx_unlock(&kstack_cache_mtx);
455		return;
456	}
457	vm_thread_stack_dispose(ksobj, ks, pages);
458}
459
460static void
461vm_thread_stack_lowmem(void *nulll)
462{
463	struct kstack_cache_entry *ks_ce, *ks_ce1;
464
465	mtx_lock(&kstack_cache_mtx);
466	ks_ce = kstack_cache;
467	kstack_cache = NULL;
468	mtx_unlock(&kstack_cache_mtx);
469
470	while (ks_ce != NULL) {
471		ks_ce1 = ks_ce;
472		ks_ce = ks_ce->next_ks_entry;
473
474		vm_thread_stack_dispose(ks_ce1->ksobj, (vm_offset_t)ks_ce1,
475		    kstack_pages);
476	}
477}
478
479static void
480kstack_cache_init(void *nulll)
481{
482
483	EVENTHANDLER_REGISTER(vm_lowmem, vm_thread_stack_lowmem, NULL,
484	    EVENTHANDLER_PRI_ANY);
485}
486
487SYSINIT(vm_kstacks, SI_SUB_KTHREAD_INIT, SI_ORDER_ANY, kstack_cache_init, NULL);
488
489#ifdef KSTACK_USAGE_PROF
490/*
491 * Track maximum stack used by a thread in kernel.
492 */
493static int max_kstack_used;
494
495SYSCTL_INT(_debug, OID_AUTO, max_kstack_used, CTLFLAG_RD,
496    &max_kstack_used, 0,
497    "Maxiumum stack depth used by a thread in kernel");
498
499void
500intr_prof_stack_use(struct thread *td, struct trapframe *frame)
501{
502	vm_offset_t stack_top;
503	vm_offset_t current;
504	int used, prev_used;
505
506	/*
507	 * Testing for interrupted kernel mode isn't strictly
508	 * needed. It optimizes the execution, since interrupts from
509	 * usermode will have only the trap frame on the stack.
510	 */
511	if (TRAPF_USERMODE(frame))
512		return;
513
514	stack_top = td->td_kstack + td->td_kstack_pages * PAGE_SIZE;
515	current = (vm_offset_t)(uintptr_t)&stack_top;
516
517	/*
518	 * Try to detect if interrupt is using kernel thread stack.
519	 * Hardware could use a dedicated stack for interrupt handling.
520	 */
521	if (stack_top <= current || current < td->td_kstack)
522		return;
523
524	used = stack_top - current;
525	for (;;) {
526		prev_used = max_kstack_used;
527		if (prev_used >= used)
528			break;
529		if (atomic_cmpset_int(&max_kstack_used, prev_used, used))
530			break;
531	}
532}
533#endif /* KSTACK_USAGE_PROF */
534
535#ifndef NO_SWAPPING
536/*
537 * Allow a thread's kernel stack to be paged out.
538 */
539static void
540vm_thread_swapout(struct thread *td)
541{
542	vm_object_t ksobj;
543	vm_page_t m;
544	int i, pages;
545
546	cpu_thread_swapout(td);
547	pages = td->td_kstack_pages;
548	ksobj = td->td_kstack_obj;
549	pmap_qremove(td->td_kstack, pages);
550	VM_OBJECT_WLOCK(ksobj);
551	for (i = 0; i < pages; i++) {
552		m = vm_page_lookup(ksobj, i);
553		if (m == NULL)
554			panic("vm_thread_swapout: kstack already missing?");
555		vm_page_dirty(m);
556		vm_page_lock(m);
557		vm_page_unwire(m, PQ_INACTIVE);
558		vm_page_unlock(m);
559	}
560	VM_OBJECT_WUNLOCK(ksobj);
561}
562
563/*
564 * Bring the kernel stack for a specified thread back in.
565 */
566static void
567vm_thread_swapin(struct thread *td)
568{
569	vm_object_t ksobj;
570	vm_page_t ma[KSTACK_MAX_PAGES];
571	int pages;
572
573	pages = td->td_kstack_pages;
574	ksobj = td->td_kstack_obj;
575	VM_OBJECT_WLOCK(ksobj);
576	for (int i = 0; i < pages; i++)
577		ma[i] = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL |
578		    VM_ALLOC_WIRED);
579	for (int i = 0; i < pages;) {
580		int j, a, count, rv;
581
582		vm_page_assert_xbusied(ma[i]);
583		if (ma[i]->valid == VM_PAGE_BITS_ALL) {
584			vm_page_xunbusy(ma[i]);
585			i++;
586			continue;
587		}
588		vm_object_pip_add(ksobj, 1);
589		for (j = i + 1; j < pages; j++)
590			if (ma[j]->valid == VM_PAGE_BITS_ALL)
591				break;
592		rv = vm_pager_has_page(ksobj, ma[i]->pindex, NULL, &a);
593		KASSERT(rv == 1, ("%s: missing page %p", __func__, ma[i]));
594		count = min(a + 1, j - i);
595		rv = vm_pager_get_pages(ksobj, ma + i, count, NULL, NULL);
596		KASSERT(rv == VM_PAGER_OK, ("%s: cannot get kstack for proc %d",
597		    __func__, td->td_proc->p_pid));
598		vm_object_pip_wakeup(ksobj);
599		for (j = i; j < i + count; j++)
600			vm_page_xunbusy(ma[j]);
601		i += count;
602	}
603	VM_OBJECT_WUNLOCK(ksobj);
604	pmap_qenter(td->td_kstack, ma, pages);
605	cpu_thread_swapin(td);
606}
607#endif /* !NO_SWAPPING */
608
609/*
610 * Implement fork's actions on an address space.
611 * Here we arrange for the address space to be copied or referenced,
612 * allocate a user struct (pcb and kernel stack), then call the
613 * machine-dependent layer to fill those in and make the new process
614 * ready to run.  The new process is set up so that it returns directly
615 * to user mode to avoid stack copying and relocation problems.
616 */
617int
618vm_forkproc(td, p2, td2, vm2, flags)
619	struct thread *td;
620	struct proc *p2;
621	struct thread *td2;
622	struct vmspace *vm2;
623	int flags;
624{
625	struct proc *p1 = td->td_proc;
626	int error;
627
628	if ((flags & RFPROC) == 0) {
629		/*
630		 * Divorce the memory, if it is shared, essentially
631		 * this changes shared memory amongst threads, into
632		 * COW locally.
633		 */
634		if ((flags & RFMEM) == 0) {
635			if (p1->p_vmspace->vm_refcnt > 1) {
636				error = vmspace_unshare(p1);
637				if (error)
638					return (error);
639			}
640		}
641		cpu_fork(td, p2, td2, flags);
642		return (0);
643	}
644
645	if (flags & RFMEM) {
646		p2->p_vmspace = p1->p_vmspace;
647		atomic_add_int(&p1->p_vmspace->vm_refcnt, 1);
648	}
649
650	while (vm_page_count_severe()) {
651		VM_WAIT;
652	}
653
654	if ((flags & RFMEM) == 0) {
655		p2->p_vmspace = vm2;
656		if (p1->p_vmspace->vm_shm)
657			shmfork(p1, p2);
658	}
659
660	/*
661	 * cpu_fork will copy and update the pcb, set up the kernel stack,
662	 * and make the child ready to run.
663	 */
664	cpu_fork(td, p2, td2, flags);
665	return (0);
666}
667
668/*
669 * Called after process has been wait(2)'ed upon and is being reaped.
670 * The idea is to reclaim resources that we could not reclaim while
671 * the process was still executing.
672 */
673void
674vm_waitproc(p)
675	struct proc *p;
676{
677
678	vmspace_exitfree(p);		/* and clean-out the vmspace */
679}
680
681void
682faultin(p)
683	struct proc *p;
684{
685#ifdef NO_SWAPPING
686
687	PROC_LOCK_ASSERT(p, MA_OWNED);
688	if ((p->p_flag & P_INMEM) == 0)
689		panic("faultin: proc swapped out with NO_SWAPPING!");
690#else /* !NO_SWAPPING */
691	struct thread *td;
692
693	PROC_LOCK_ASSERT(p, MA_OWNED);
694	/*
695	 * If another process is swapping in this process,
696	 * just wait until it finishes.
697	 */
698	if (p->p_flag & P_SWAPPINGIN) {
699		while (p->p_flag & P_SWAPPINGIN)
700			msleep(&p->p_flag, &p->p_mtx, PVM, "faultin", 0);
701		return;
702	}
703	if ((p->p_flag & P_INMEM) == 0) {
704		/*
705		 * Don't let another thread swap process p out while we are
706		 * busy swapping it in.
707		 */
708		++p->p_lock;
709		p->p_flag |= P_SWAPPINGIN;
710		PROC_UNLOCK(p);
711
712		/*
713		 * We hold no lock here because the list of threads
714		 * can not change while all threads in the process are
715		 * swapped out.
716		 */
717		FOREACH_THREAD_IN_PROC(p, td)
718			vm_thread_swapin(td);
719		PROC_LOCK(p);
720		swapclear(p);
721		p->p_swtick = ticks;
722
723		wakeup(&p->p_flag);
724
725		/* Allow other threads to swap p out now. */
726		--p->p_lock;
727	}
728#endif /* NO_SWAPPING */
729}
730
731/*
732 * This swapin algorithm attempts to swap-in processes only if there
733 * is enough space for them.  Of course, if a process waits for a long
734 * time, it will be swapped in anyway.
735 */
736void
737swapper(void)
738{
739	struct proc *p;
740	struct thread *td;
741	struct proc *pp;
742	int slptime;
743	int swtime;
744	int ppri;
745	int pri;
746
747loop:
748	if (vm_page_count_min()) {
749		VM_WAIT;
750		goto loop;
751	}
752
753	pp = NULL;
754	ppri = INT_MIN;
755	sx_slock(&allproc_lock);
756	FOREACH_PROC_IN_SYSTEM(p) {
757		PROC_LOCK(p);
758		if (p->p_state == PRS_NEW ||
759		    p->p_flag & (P_SWAPPINGOUT | P_SWAPPINGIN | P_INMEM)) {
760			PROC_UNLOCK(p);
761			continue;
762		}
763		swtime = (ticks - p->p_swtick) / hz;
764		FOREACH_THREAD_IN_PROC(p, td) {
765			/*
766			 * An otherwise runnable thread of a process
767			 * swapped out has only the TDI_SWAPPED bit set.
768			 *
769			 */
770			thread_lock(td);
771			if (td->td_inhibitors == TDI_SWAPPED) {
772				slptime = (ticks - td->td_slptick) / hz;
773				pri = swtime + slptime;
774				if ((td->td_flags & TDF_SWAPINREQ) == 0)
775					pri -= p->p_nice * 8;
776				/*
777				 * if this thread is higher priority
778				 * and there is enough space, then select
779				 * this process instead of the previous
780				 * selection.
781				 */
782				if (pri > ppri) {
783					pp = p;
784					ppri = pri;
785				}
786			}
787			thread_unlock(td);
788		}
789		PROC_UNLOCK(p);
790	}
791	sx_sunlock(&allproc_lock);
792
793	/*
794	 * Nothing to do, back to sleep.
795	 */
796	if ((p = pp) == NULL) {
797		tsleep(&proc0, PVM, "swapin", MAXSLP * hz / 2);
798		goto loop;
799	}
800	PROC_LOCK(p);
801
802	/*
803	 * Another process may be bringing or may have already
804	 * brought this process in while we traverse all threads.
805	 * Or, this process may even be being swapped out again.
806	 */
807	if (p->p_flag & (P_INMEM | P_SWAPPINGOUT | P_SWAPPINGIN)) {
808		PROC_UNLOCK(p);
809		goto loop;
810	}
811
812	/*
813	 * We would like to bring someone in. (only if there is space).
814	 * [What checks the space? ]
815	 */
816	faultin(p);
817	PROC_UNLOCK(p);
818	goto loop;
819}
820
821void
822kick_proc0(void)
823{
824
825	wakeup(&proc0);
826}
827
828#ifndef NO_SWAPPING
829
830/*
831 * Swap_idle_threshold1 is the guaranteed swapped in time for a process
832 */
833static int swap_idle_threshold1 = 2;
834SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW,
835    &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process");
836
837/*
838 * Swap_idle_threshold2 is the time that a process can be idle before
839 * it will be swapped out, if idle swapping is enabled.
840 */
841static int swap_idle_threshold2 = 10;
842SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW,
843    &swap_idle_threshold2, 0, "Time before a process will be swapped out");
844
845/*
846 * First, if any processes have been sleeping or stopped for at least
847 * "swap_idle_threshold1" seconds, they are swapped out.  If, however,
848 * no such processes exist, then the longest-sleeping or stopped
849 * process is swapped out.  Finally, and only as a last resort, if
850 * there are no sleeping or stopped processes, the longest-resident
851 * process is swapped out.
852 */
853void
854swapout_procs(action)
855int action;
856{
857	struct proc *p;
858	struct thread *td;
859	int didswap = 0;
860
861retry:
862	sx_slock(&allproc_lock);
863	FOREACH_PROC_IN_SYSTEM(p) {
864		struct vmspace *vm;
865		int minslptime = 100000;
866		int slptime;
867
868		PROC_LOCK(p);
869		/*
870		 * Watch out for a process in
871		 * creation.  It may have no
872		 * address space or lock yet.
873		 */
874		if (p->p_state == PRS_NEW) {
875			PROC_UNLOCK(p);
876			continue;
877		}
878		/*
879		 * An aio daemon switches its
880		 * address space while running.
881		 * Perform a quick check whether
882		 * a process has P_SYSTEM.
883		 * Filter out exiting processes.
884		 */
885		if ((p->p_flag & (P_SYSTEM | P_WEXIT)) != 0) {
886			PROC_UNLOCK(p);
887			continue;
888		}
889		_PHOLD_LITE(p);
890		PROC_UNLOCK(p);
891		sx_sunlock(&allproc_lock);
892
893		/*
894		 * Do not swapout a process that
895		 * is waiting for VM data
896		 * structures as there is a possible
897		 * deadlock.  Test this first as
898		 * this may block.
899		 *
900		 * Lock the map until swapout
901		 * finishes, or a thread of this
902		 * process may attempt to alter
903		 * the map.
904		 */
905		vm = vmspace_acquire_ref(p);
906		if (vm == NULL)
907			goto nextproc2;
908		if (!vm_map_trylock(&vm->vm_map))
909			goto nextproc1;
910
911		PROC_LOCK(p);
912		if (p->p_lock != 1 || (p->p_flag & (P_STOPPED_SINGLE |
913		    P_TRACED | P_SYSTEM)) != 0)
914			goto nextproc;
915
916		/*
917		 * only aiod changes vmspace, however it will be
918		 * skipped because of the if statement above checking
919		 * for P_SYSTEM
920		 */
921		if ((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) != P_INMEM)
922			goto nextproc;
923
924		switch (p->p_state) {
925		default:
926			/* Don't swap out processes in any sort
927			 * of 'special' state. */
928			break;
929
930		case PRS_NORMAL:
931			/*
932			 * do not swapout a realtime process
933			 * Check all the thread groups..
934			 */
935			FOREACH_THREAD_IN_PROC(p, td) {
936				thread_lock(td);
937				if (PRI_IS_REALTIME(td->td_pri_class)) {
938					thread_unlock(td);
939					goto nextproc;
940				}
941				slptime = (ticks - td->td_slptick) / hz;
942				/*
943				 * Guarantee swap_idle_threshold1
944				 * time in memory.
945				 */
946				if (slptime < swap_idle_threshold1) {
947					thread_unlock(td);
948					goto nextproc;
949				}
950
951				/*
952				 * Do not swapout a process if it is
953				 * waiting on a critical event of some
954				 * kind or there is a thread whose
955				 * pageable memory may be accessed.
956				 *
957				 * This could be refined to support
958				 * swapping out a thread.
959				 */
960				if (!thread_safetoswapout(td)) {
961					thread_unlock(td);
962					goto nextproc;
963				}
964				/*
965				 * If the system is under memory stress,
966				 * or if we are swapping
967				 * idle processes >= swap_idle_threshold2,
968				 * then swap the process out.
969				 */
970				if (((action & VM_SWAP_NORMAL) == 0) &&
971				    (((action & VM_SWAP_IDLE) == 0) ||
972				    (slptime < swap_idle_threshold2))) {
973					thread_unlock(td);
974					goto nextproc;
975				}
976
977				if (minslptime > slptime)
978					minslptime = slptime;
979				thread_unlock(td);
980			}
981
982			/*
983			 * If the pageout daemon didn't free enough pages,
984			 * or if this process is idle and the system is
985			 * configured to swap proactively, swap it out.
986			 */
987			if ((action & VM_SWAP_NORMAL) ||
988				((action & VM_SWAP_IDLE) &&
989				 (minslptime > swap_idle_threshold2))) {
990				_PRELE(p);
991				if (swapout(p) == 0)
992					didswap++;
993				PROC_UNLOCK(p);
994				vm_map_unlock(&vm->vm_map);
995				vmspace_free(vm);
996				goto retry;
997			}
998		}
999nextproc:
1000		PROC_UNLOCK(p);
1001		vm_map_unlock(&vm->vm_map);
1002nextproc1:
1003		vmspace_free(vm);
1004nextproc2:
1005		sx_slock(&allproc_lock);
1006		PRELE(p);
1007	}
1008	sx_sunlock(&allproc_lock);
1009	/*
1010	 * If we swapped something out, and another process needed memory,
1011	 * then wakeup the sched process.
1012	 */
1013	if (didswap)
1014		wakeup(&proc0);
1015}
1016
1017static void
1018swapclear(p)
1019	struct proc *p;
1020{
1021	struct thread *td;
1022
1023	PROC_LOCK_ASSERT(p, MA_OWNED);
1024
1025	FOREACH_THREAD_IN_PROC(p, td) {
1026		thread_lock(td);
1027		td->td_flags |= TDF_INMEM;
1028		td->td_flags &= ~TDF_SWAPINREQ;
1029		TD_CLR_SWAPPED(td);
1030		if (TD_CAN_RUN(td))
1031			if (setrunnable(td)) {
1032#ifdef INVARIANTS
1033				/*
1034				 * XXX: We just cleared TDI_SWAPPED
1035				 * above and set TDF_INMEM, so this
1036				 * should never happen.
1037				 */
1038				panic("not waking up swapper");
1039#endif
1040			}
1041		thread_unlock(td);
1042	}
1043	p->p_flag &= ~(P_SWAPPINGIN|P_SWAPPINGOUT);
1044	p->p_flag |= P_INMEM;
1045}
1046
1047static int
1048swapout(p)
1049	struct proc *p;
1050{
1051	struct thread *td;
1052
1053	PROC_LOCK_ASSERT(p, MA_OWNED);
1054#if defined(SWAP_DEBUG)
1055	printf("swapping out %d\n", p->p_pid);
1056#endif
1057
1058	/*
1059	 * The states of this process and its threads may have changed
1060	 * by now.  Assuming that there is only one pageout daemon thread,
1061	 * this process should still be in memory.
1062	 */
1063	KASSERT((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) == P_INMEM,
1064		("swapout: lost a swapout race?"));
1065
1066	/*
1067	 * remember the process resident count
1068	 */
1069	p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
1070	/*
1071	 * Check and mark all threads before we proceed.
1072	 */
1073	p->p_flag &= ~P_INMEM;
1074	p->p_flag |= P_SWAPPINGOUT;
1075	FOREACH_THREAD_IN_PROC(p, td) {
1076		thread_lock(td);
1077		if (!thread_safetoswapout(td)) {
1078			thread_unlock(td);
1079			swapclear(p);
1080			return (EBUSY);
1081		}
1082		td->td_flags &= ~TDF_INMEM;
1083		TD_SET_SWAPPED(td);
1084		thread_unlock(td);
1085	}
1086	td = FIRST_THREAD_IN_PROC(p);
1087	++td->td_ru.ru_nswap;
1088	PROC_UNLOCK(p);
1089
1090	/*
1091	 * This list is stable because all threads are now prevented from
1092	 * running.  The list is only modified in the context of a running
1093	 * thread in this process.
1094	 */
1095	FOREACH_THREAD_IN_PROC(p, td)
1096		vm_thread_swapout(td);
1097
1098	PROC_LOCK(p);
1099	p->p_flag &= ~P_SWAPPINGOUT;
1100	p->p_swtick = ticks;
1101	return (0);
1102}
1103#endif /* !NO_SWAPPING */
1104