Deleted Added
sdiff udiff text old ( 217192 ) new ( 220373 )
full compact
1/*-
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94
33 *
34 *
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
37 *
38 * Permission to use, copy, modify and distribute this software and
39 * its documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
46 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie the
56 * rights to redistribute these changes.
57 */
58
59#include <sys/cdefs.h>
60__FBSDID("$FreeBSD: head/sys/vm/vm_glue.c 217192 2011-01-09 12:50:44Z kib $");
61
62#include "opt_vm.h"
63#include "opt_kstack_pages.h"
64#include "opt_kstack_max_pages.h"
65
66#include <sys/param.h>
67#include <sys/systm.h>
68#include <sys/limits.h>
69#include <sys/lock.h>
70#include <sys/mutex.h>
71#include <sys/proc.h>
72#include <sys/resourcevar.h>
73#include <sys/sched.h>
74#include <sys/sf_buf.h>
75#include <sys/shm.h>
76#include <sys/vmmeter.h>
77#include <sys/sx.h>
78#include <sys/sysctl.h>
79
80#include <sys/eventhandler.h>
81#include <sys/kernel.h>
82#include <sys/ktr.h>
83#include <sys/unistd.h>
84
85#include <vm/vm.h>
86#include <vm/vm_param.h>
87#include <vm/pmap.h>
88#include <vm/vm_map.h>
89#include <vm/vm_page.h>
90#include <vm/vm_pageout.h>
91#include <vm/vm_object.h>
92#include <vm/vm_kern.h>
93#include <vm/vm_extern.h>
94#include <vm/vm_pager.h>
95#include <vm/swap_pager.h>
96
97/*
98 * System initialization
99 *
100 * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
101 *
102 * Note: run scheduling should be divorced from the vm system.
103 */
104static void scheduler(void *);
105SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_ANY, scheduler, NULL);
106
107#ifndef NO_SWAPPING
108static int swapout(struct proc *);
109static void swapclear(struct proc *);
110static void vm_thread_swapin(struct thread *td);
111static void vm_thread_swapout(struct thread *td);
112#endif
113
114/*
115 * MPSAFE
116 *
117 * WARNING! This code calls vm_map_check_protection() which only checks
118 * the associated vm_map_entry range. It does not determine whether the
119 * contents of the memory is actually readable or writable. In most cases
120 * just checking the vm_map_entry is sufficient within the kernel's address
121 * space.
122 */
123int
124kernacc(addr, len, rw)
125 void *addr;
126 int len, rw;
127{
128 boolean_t rv;
129 vm_offset_t saddr, eaddr;
130 vm_prot_t prot;
131
132 KASSERT((rw & ~VM_PROT_ALL) == 0,
133 ("illegal ``rw'' argument to kernacc (%x)\n", rw));
134
135 if ((vm_offset_t)addr + len > kernel_map->max_offset ||
136 (vm_offset_t)addr + len < (vm_offset_t)addr)
137 return (FALSE);
138
139 prot = rw;
140 saddr = trunc_page((vm_offset_t)addr);
141 eaddr = round_page((vm_offset_t)addr + len);
142 vm_map_lock_read(kernel_map);
143 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
144 vm_map_unlock_read(kernel_map);
145 return (rv == TRUE);
146}
147
148/*
149 * MPSAFE
150 *
151 * WARNING! This code calls vm_map_check_protection() which only checks
152 * the associated vm_map_entry range. It does not determine whether the
153 * contents of the memory is actually readable or writable. vmapbuf(),
154 * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
155 * used in conjuction with this call.
156 */
157int
158useracc(addr, len, rw)
159 void *addr;
160 int len, rw;
161{
162 boolean_t rv;
163 vm_prot_t prot;
164 vm_map_t map;
165
166 KASSERT((rw & ~VM_PROT_ALL) == 0,
167 ("illegal ``rw'' argument to useracc (%x)\n", rw));
168 prot = rw;
169 map = &curproc->p_vmspace->vm_map;
170 if ((vm_offset_t)addr + len > vm_map_max(map) ||
171 (vm_offset_t)addr + len < (vm_offset_t)addr) {
172 return (FALSE);
173 }
174 vm_map_lock_read(map);
175 rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
176 round_page((vm_offset_t)addr + len), prot);
177 vm_map_unlock_read(map);
178 return (rv == TRUE);
179}
180
181int
182vslock(void *addr, size_t len)
183{
184 vm_offset_t end, last, start;
185 vm_size_t npages;
186 int error;
187
188 last = (vm_offset_t)addr + len;
189 start = trunc_page((vm_offset_t)addr);
190 end = round_page(last);
191 if (last < (vm_offset_t)addr || end < (vm_offset_t)addr)
192 return (EINVAL);
193 npages = atop(end - start);
194 if (npages > vm_page_max_wired)
195 return (ENOMEM);
196 PROC_LOCK(curproc);
197 if (ptoa(npages +
198 pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map))) >
199 lim_cur(curproc, RLIMIT_MEMLOCK)) {
200 PROC_UNLOCK(curproc);
201 return (ENOMEM);
202 }
203 PROC_UNLOCK(curproc);
204#if 0
205 /*
206 * XXX - not yet
207 *
208 * The limit for transient usage of wired pages should be
209 * larger than for "permanent" wired pages (mlock()).
210 *
211 * Also, the sysctl code, which is the only present user
212 * of vslock(), does a hard loop on EAGAIN.
213 */
214 if (npages + cnt.v_wire_count > vm_page_max_wired)
215 return (EAGAIN);
216#endif
217 error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
218 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
219 /*
220 * Return EFAULT on error to match copy{in,out}() behaviour
221 * rather than returning ENOMEM like mlock() would.
222 */
223 return (error == KERN_SUCCESS ? 0 : EFAULT);
224}
225
226void
227vsunlock(void *addr, size_t len)
228{
229
230 /* Rely on the parameter sanity checks performed by vslock(). */
231 (void)vm_map_unwire(&curproc->p_vmspace->vm_map,
232 trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
233 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
234}
235
236/*
237 * Pin the page contained within the given object at the given offset. If the
238 * page is not resident, allocate and load it using the given object's pager.
239 * Return the pinned page if successful; otherwise, return NULL.
240 */
241static vm_page_t
242vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
243{
244 vm_page_t m, ma[1];
245 vm_pindex_t pindex;
246 int rv;
247
248 VM_OBJECT_LOCK(object);
249 pindex = OFF_TO_IDX(offset);
250 m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
251 if (m->valid != VM_PAGE_BITS_ALL) {
252 ma[0] = m;
253 rv = vm_pager_get_pages(object, ma, 1, 0);
254 m = vm_page_lookup(object, pindex);
255 if (m == NULL)
256 goto out;
257 if (rv != VM_PAGER_OK) {
258 vm_page_lock(m);
259 vm_page_free(m);
260 vm_page_unlock(m);
261 m = NULL;
262 goto out;
263 }
264 }
265 vm_page_lock(m);
266 vm_page_hold(m);
267 vm_page_unlock(m);
268 vm_page_wakeup(m);
269out:
270 VM_OBJECT_UNLOCK(object);
271 return (m);
272}
273
274/*
275 * Return a CPU private mapping to the page at the given offset within the
276 * given object. The page is pinned before it is mapped.
277 */
278struct sf_buf *
279vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset)
280{
281 vm_page_t m;
282
283 m = vm_imgact_hold_page(object, offset);
284 if (m == NULL)
285 return (NULL);
286 sched_pin();
287 return (sf_buf_alloc(m, SFB_CPUPRIVATE));
288}
289
290/*
291 * Destroy the given CPU private mapping and unpin the page that it mapped.
292 */
293void
294vm_imgact_unmap_page(struct sf_buf *sf)
295{
296 vm_page_t m;
297
298 m = sf_buf_page(sf);
299 sf_buf_free(sf);
300 sched_unpin();
301 vm_page_lock(m);
302 vm_page_unhold(m);
303 vm_page_unlock(m);
304}
305
306void
307vm_sync_icache(vm_map_t map, vm_offset_t va, vm_offset_t sz)
308{
309
310 pmap_sync_icache(map->pmap, va, sz);
311}
312
313struct kstack_cache_entry {
314 vm_object_t ksobj;
315 struct kstack_cache_entry *next_ks_entry;
316};
317
318static struct kstack_cache_entry *kstack_cache;
319static int kstack_cache_size = 128;
320static int kstacks;
321static struct mtx kstack_cache_mtx;
322SYSCTL_INT(_vm, OID_AUTO, kstack_cache_size, CTLFLAG_RW, &kstack_cache_size, 0,
323 "");
324SYSCTL_INT(_vm, OID_AUTO, kstacks, CTLFLAG_RD, &kstacks, 0,
325 "");
326
327#ifndef KSTACK_MAX_PAGES
328#define KSTACK_MAX_PAGES 32
329#endif
330
331/*
332 * Create the kernel stack (including pcb for i386) for a new thread.
333 * This routine directly affects the fork perf for a process and
334 * create performance for a thread.
335 */
336int
337vm_thread_new(struct thread *td, int pages)
338{
339 vm_object_t ksobj;
340 vm_offset_t ks;
341 vm_page_t m, ma[KSTACK_MAX_PAGES];
342 struct kstack_cache_entry *ks_ce;
343 int i;
344
345 /* Bounds check */
346 if (pages <= 1)
347 pages = KSTACK_PAGES;
348 else if (pages > KSTACK_MAX_PAGES)
349 pages = KSTACK_MAX_PAGES;
350
351 if (pages == KSTACK_PAGES) {
352 mtx_lock(&kstack_cache_mtx);
353 if (kstack_cache != NULL) {
354 ks_ce = kstack_cache;
355 kstack_cache = ks_ce->next_ks_entry;
356 mtx_unlock(&kstack_cache_mtx);
357
358 td->td_kstack_obj = ks_ce->ksobj;
359 td->td_kstack = (vm_offset_t)ks_ce;
360 td->td_kstack_pages = KSTACK_PAGES;
361 return (1);
362 }
363 mtx_unlock(&kstack_cache_mtx);
364 }
365
366 /*
367 * Allocate an object for the kstack.
368 */
369 ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
370
371 /*
372 * Get a kernel virtual address for this thread's kstack.
373 */
374#if defined(__mips__)
375 /*
376 * We need to align the kstack's mapped address to fit within
377 * a single TLB entry.
378 */
379 ks = kmem_alloc_nofault_space(kernel_map,
380 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE, VMFS_TLB_ALIGNED_SPACE);
381#else
382 ks = kmem_alloc_nofault(kernel_map,
383 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
384#endif
385 if (ks == 0) {
386 printf("vm_thread_new: kstack allocation failed\n");
387 vm_object_deallocate(ksobj);
388 return (0);
389 }
390
391 atomic_add_int(&kstacks, 1);
392 if (KSTACK_GUARD_PAGES != 0) {
393 pmap_qremove(ks, KSTACK_GUARD_PAGES);
394 ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
395 }
396 td->td_kstack_obj = ksobj;
397 td->td_kstack = ks;
398 /*
399 * Knowing the number of pages allocated is useful when you
400 * want to deallocate them.
401 */
402 td->td_kstack_pages = pages;
403 /*
404 * For the length of the stack, link in a real page of ram for each
405 * page of stack.
406 */
407 VM_OBJECT_LOCK(ksobj);
408 for (i = 0; i < pages; i++) {
409 /*
410 * Get a kernel stack page.
411 */
412 m = vm_page_grab(ksobj, i, VM_ALLOC_NOBUSY |
413 VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
414 ma[i] = m;
415 m->valid = VM_PAGE_BITS_ALL;
416 }
417 VM_OBJECT_UNLOCK(ksobj);
418 pmap_qenter(ks, ma, pages);
419 return (1);
420}
421
422static void
423vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages)
424{
425 vm_page_t m;
426 int i;
427
428 atomic_add_int(&kstacks, -1);
429 pmap_qremove(ks, pages);
430 VM_OBJECT_LOCK(ksobj);
431 for (i = 0; i < pages; i++) {
432 m = vm_page_lookup(ksobj, i);
433 if (m == NULL)
434 panic("vm_thread_dispose: kstack already missing?");
435 vm_page_lock(m);
436 vm_page_unwire(m, 0);
437 vm_page_free(m);
438 vm_page_unlock(m);
439 }
440 VM_OBJECT_UNLOCK(ksobj);
441 vm_object_deallocate(ksobj);
442 kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
443 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
444}
445
446/*
447 * Dispose of a thread's kernel stack.
448 */
449void
450vm_thread_dispose(struct thread *td)
451{
452 vm_object_t ksobj;
453 vm_offset_t ks;
454 struct kstack_cache_entry *ks_ce;
455 int pages;
456
457 pages = td->td_kstack_pages;
458 ksobj = td->td_kstack_obj;
459 ks = td->td_kstack;
460 td->td_kstack = 0;
461 td->td_kstack_pages = 0;
462 if (pages == KSTACK_PAGES && kstacks <= kstack_cache_size) {
463 ks_ce = (struct kstack_cache_entry *)ks;
464 ks_ce->ksobj = ksobj;
465 mtx_lock(&kstack_cache_mtx);
466 ks_ce->next_ks_entry = kstack_cache;
467 kstack_cache = ks_ce;
468 mtx_unlock(&kstack_cache_mtx);
469 return;
470 }
471 vm_thread_stack_dispose(ksobj, ks, pages);
472}
473
474static void
475vm_thread_stack_lowmem(void *nulll)
476{
477 struct kstack_cache_entry *ks_ce, *ks_ce1;
478
479 mtx_lock(&kstack_cache_mtx);
480 ks_ce = kstack_cache;
481 kstack_cache = NULL;
482 mtx_unlock(&kstack_cache_mtx);
483
484 while (ks_ce != NULL) {
485 ks_ce1 = ks_ce;
486 ks_ce = ks_ce->next_ks_entry;
487
488 vm_thread_stack_dispose(ks_ce1->ksobj, (vm_offset_t)ks_ce1,
489 KSTACK_PAGES);
490 }
491}
492
493static void
494kstack_cache_init(void *nulll)
495{
496
497 EVENTHANDLER_REGISTER(vm_lowmem, vm_thread_stack_lowmem, NULL,
498 EVENTHANDLER_PRI_ANY);
499}
500
501MTX_SYSINIT(kstack_cache, &kstack_cache_mtx, "kstkch", MTX_DEF);
502SYSINIT(vm_kstacks, SI_SUB_KTHREAD_INIT, SI_ORDER_ANY, kstack_cache_init, NULL);
503
504#ifndef NO_SWAPPING
505/*
506 * Allow a thread's kernel stack to be paged out.
507 */
508static void
509vm_thread_swapout(struct thread *td)
510{
511 vm_object_t ksobj;
512 vm_page_t m;
513 int i, pages;
514
515 cpu_thread_swapout(td);
516 pages = td->td_kstack_pages;
517 ksobj = td->td_kstack_obj;
518 pmap_qremove(td->td_kstack, pages);
519 VM_OBJECT_LOCK(ksobj);
520 for (i = 0; i < pages; i++) {
521 m = vm_page_lookup(ksobj, i);
522 if (m == NULL)
523 panic("vm_thread_swapout: kstack already missing?");
524 vm_page_dirty(m);
525 vm_page_lock(m);
526 vm_page_unwire(m, 0);
527 vm_page_unlock(m);
528 }
529 VM_OBJECT_UNLOCK(ksobj);
530}
531
532/*
533 * Bring the kernel stack for a specified thread back in.
534 */
535static void
536vm_thread_swapin(struct thread *td)
537{
538 vm_object_t ksobj;
539 vm_page_t ma[KSTACK_MAX_PAGES];
540 int i, j, k, pages, rv;
541
542 pages = td->td_kstack_pages;
543 ksobj = td->td_kstack_obj;
544 VM_OBJECT_LOCK(ksobj);
545 for (i = 0; i < pages; i++)
546 ma[i] = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY |
547 VM_ALLOC_WIRED);
548 for (i = 0; i < pages; i++) {
549 if (ma[i]->valid != VM_PAGE_BITS_ALL) {
550 KASSERT(ma[i]->oflags & VPO_BUSY,
551 ("lost busy 1"));
552 vm_object_pip_add(ksobj, 1);
553 for (j = i + 1; j < pages; j++) {
554 KASSERT(ma[j]->valid == VM_PAGE_BITS_ALL ||
555 (ma[j]->oflags & VPO_BUSY),
556 ("lost busy 2"));
557 if (ma[j]->valid == VM_PAGE_BITS_ALL)
558 break;
559 }
560 rv = vm_pager_get_pages(ksobj, ma + i, j - i, 0);
561 if (rv != VM_PAGER_OK)
562 panic("vm_thread_swapin: cannot get kstack for proc: %d",
563 td->td_proc->p_pid);
564 vm_object_pip_wakeup(ksobj);
565 for (k = i; k < j; k++)
566 ma[k] = vm_page_lookup(ksobj, k);
567 vm_page_wakeup(ma[i]);
568 } else if (ma[i]->oflags & VPO_BUSY)
569 vm_page_wakeup(ma[i]);
570 }
571 VM_OBJECT_UNLOCK(ksobj);
572 pmap_qenter(td->td_kstack, ma, pages);
573 cpu_thread_swapin(td);
574}
575#endif /* !NO_SWAPPING */
576
577/*
578 * Implement fork's actions on an address space.
579 * Here we arrange for the address space to be copied or referenced,
580 * allocate a user struct (pcb and kernel stack), then call the
581 * machine-dependent layer to fill those in and make the new process
582 * ready to run. The new process is set up so that it returns directly
583 * to user mode to avoid stack copying and relocation problems.
584 */
585int
586vm_forkproc(td, p2, td2, vm2, flags)
587 struct thread *td;
588 struct proc *p2;
589 struct thread *td2;
590 struct vmspace *vm2;
591 int flags;
592{
593 struct proc *p1 = td->td_proc;
594 int error;
595
596 if ((flags & RFPROC) == 0) {
597 /*
598 * Divorce the memory, if it is shared, essentially
599 * this changes shared memory amongst threads, into
600 * COW locally.
601 */
602 if ((flags & RFMEM) == 0) {
603 if (p1->p_vmspace->vm_refcnt > 1) {
604 error = vmspace_unshare(p1);
605 if (error)
606 return (error);
607 }
608 }
609 cpu_fork(td, p2, td2, flags);
610 return (0);
611 }
612
613 if (flags & RFMEM) {
614 p2->p_vmspace = p1->p_vmspace;
615 atomic_add_int(&p1->p_vmspace->vm_refcnt, 1);
616 }
617
618 while (vm_page_count_severe()) {
619 VM_WAIT;
620 }
621
622 if ((flags & RFMEM) == 0) {
623 p2->p_vmspace = vm2;
624 if (p1->p_vmspace->vm_shm)
625 shmfork(p1, p2);
626 }
627
628 /*
629 * cpu_fork will copy and update the pcb, set up the kernel stack,
630 * and make the child ready to run.
631 */
632 cpu_fork(td, p2, td2, flags);
633 return (0);
634}
635
636/*
637 * Called after process has been wait(2)'ed apon and is being reaped.
638 * The idea is to reclaim resources that we could not reclaim while
639 * the process was still executing.
640 */
641void
642vm_waitproc(p)
643 struct proc *p;
644{
645
646 vmspace_exitfree(p); /* and clean-out the vmspace */
647}
648
649void
650faultin(p)
651 struct proc *p;
652{
653#ifdef NO_SWAPPING
654
655 PROC_LOCK_ASSERT(p, MA_OWNED);
656 if ((p->p_flag & P_INMEM) == 0)
657 panic("faultin: proc swapped out with NO_SWAPPING!");
658#else /* !NO_SWAPPING */
659 struct thread *td;
660
661 PROC_LOCK_ASSERT(p, MA_OWNED);
662 /*
663 * If another process is swapping in this process,
664 * just wait until it finishes.
665 */
666 if (p->p_flag & P_SWAPPINGIN) {
667 while (p->p_flag & P_SWAPPINGIN)
668 msleep(&p->p_flag, &p->p_mtx, PVM, "faultin", 0);
669 return;
670 }
671 if ((p->p_flag & P_INMEM) == 0) {
672 /*
673 * Don't let another thread swap process p out while we are
674 * busy swapping it in.
675 */
676 ++p->p_lock;
677 p->p_flag |= P_SWAPPINGIN;
678 PROC_UNLOCK(p);
679
680 /*
681 * We hold no lock here because the list of threads
682 * can not change while all threads in the process are
683 * swapped out.
684 */
685 FOREACH_THREAD_IN_PROC(p, td)
686 vm_thread_swapin(td);
687 PROC_LOCK(p);
688 swapclear(p);
689 p->p_swtick = ticks;
690
691 wakeup(&p->p_flag);
692
693 /* Allow other threads to swap p out now. */
694 --p->p_lock;
695 }
696#endif /* NO_SWAPPING */
697}
698
699/*
700 * This swapin algorithm attempts to swap-in processes only if there
701 * is enough space for them. Of course, if a process waits for a long
702 * time, it will be swapped in anyway.
703 *
704 * Giant is held on entry.
705 */
706/* ARGSUSED*/
707static void
708scheduler(dummy)
709 void *dummy;
710{
711 struct proc *p;
712 struct thread *td;
713 struct proc *pp;
714 int slptime;
715 int swtime;
716 int ppri;
717 int pri;
718
719 mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED);
720 mtx_unlock(&Giant);
721
722loop:
723 if (vm_page_count_min()) {
724 VM_WAIT;
725 goto loop;
726 }
727
728 pp = NULL;
729 ppri = INT_MIN;
730 sx_slock(&allproc_lock);
731 FOREACH_PROC_IN_SYSTEM(p) {
732 PROC_LOCK(p);
733 if (p->p_flag & (P_SWAPPINGOUT | P_SWAPPINGIN | P_INMEM)) {
734 PROC_UNLOCK(p);
735 continue;
736 }
737 swtime = (ticks - p->p_swtick) / hz;
738 FOREACH_THREAD_IN_PROC(p, td) {
739 /*
740 * An otherwise runnable thread of a process
741 * swapped out has only the TDI_SWAPPED bit set.
742 *
743 */
744 thread_lock(td);
745 if (td->td_inhibitors == TDI_SWAPPED) {
746 slptime = (ticks - td->td_slptick) / hz;
747 pri = swtime + slptime;
748 if ((td->td_flags & TDF_SWAPINREQ) == 0)
749 pri -= p->p_nice * 8;
750 /*
751 * if this thread is higher priority
752 * and there is enough space, then select
753 * this process instead of the previous
754 * selection.
755 */
756 if (pri > ppri) {
757 pp = p;
758 ppri = pri;
759 }
760 }
761 thread_unlock(td);
762 }
763 PROC_UNLOCK(p);
764 }
765 sx_sunlock(&allproc_lock);
766
767 /*
768 * Nothing to do, back to sleep.
769 */
770 if ((p = pp) == NULL) {
771 tsleep(&proc0, PVM, "sched", MAXSLP * hz / 2);
772 goto loop;
773 }
774 PROC_LOCK(p);
775
776 /*
777 * Another process may be bringing or may have already
778 * brought this process in while we traverse all threads.
779 * Or, this process may even be being swapped out again.
780 */
781 if (p->p_flag & (P_INMEM | P_SWAPPINGOUT | P_SWAPPINGIN)) {
782 PROC_UNLOCK(p);
783 goto loop;
784 }
785
786 /*
787 * We would like to bring someone in. (only if there is space).
788 * [What checks the space? ]
789 */
790 faultin(p);
791 PROC_UNLOCK(p);
792 goto loop;
793}
794
795void
796kick_proc0(void)
797{
798
799 wakeup(&proc0);
800}
801
802#ifndef NO_SWAPPING
803
804/*
805 * Swap_idle_threshold1 is the guaranteed swapped in time for a process
806 */
807static int swap_idle_threshold1 = 2;
808SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW,
809 &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process");
810
811/*
812 * Swap_idle_threshold2 is the time that a process can be idle before
813 * it will be swapped out, if idle swapping is enabled.
814 */
815static int swap_idle_threshold2 = 10;
816SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW,
817 &swap_idle_threshold2, 0, "Time before a process will be swapped out");
818
819/*
820 * First, if any processes have been sleeping or stopped for at least
821 * "swap_idle_threshold1" seconds, they are swapped out. If, however,
822 * no such processes exist, then the longest-sleeping or stopped
823 * process is swapped out. Finally, and only as a last resort, if
824 * there are no sleeping or stopped processes, the longest-resident
825 * process is swapped out.
826 */
827void
828swapout_procs(action)
829int action;
830{
831 struct proc *p;
832 struct thread *td;
833 int didswap = 0;
834
835retry:
836 sx_slock(&allproc_lock);
837 FOREACH_PROC_IN_SYSTEM(p) {
838 struct vmspace *vm;
839 int minslptime = 100000;
840 int slptime;
841
842 /*
843 * Watch out for a process in
844 * creation. It may have no
845 * address space or lock yet.
846 */
847 if (p->p_state == PRS_NEW)
848 continue;
849 /*
850 * An aio daemon switches its
851 * address space while running.
852 * Perform a quick check whether
853 * a process has P_SYSTEM.
854 */
855 if ((p->p_flag & P_SYSTEM) != 0)
856 continue;
857 /*
858 * Do not swapout a process that
859 * is waiting for VM data
860 * structures as there is a possible
861 * deadlock. Test this first as
862 * this may block.
863 *
864 * Lock the map until swapout
865 * finishes, or a thread of this
866 * process may attempt to alter
867 * the map.
868 */
869 vm = vmspace_acquire_ref(p);
870 if (vm == NULL)
871 continue;
872 if (!vm_map_trylock(&vm->vm_map))
873 goto nextproc1;
874
875 PROC_LOCK(p);
876 if (p->p_lock != 0 ||
877 (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT)
878 ) != 0) {
879 goto nextproc;
880 }
881 /*
882 * only aiod changes vmspace, however it will be
883 * skipped because of the if statement above checking
884 * for P_SYSTEM
885 */
886 if ((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) != P_INMEM)
887 goto nextproc;
888
889 switch (p->p_state) {
890 default:
891 /* Don't swap out processes in any sort
892 * of 'special' state. */
893 break;
894
895 case PRS_NORMAL:
896 /*
897 * do not swapout a realtime process
898 * Check all the thread groups..
899 */
900 FOREACH_THREAD_IN_PROC(p, td) {
901 thread_lock(td);
902 if (PRI_IS_REALTIME(td->td_pri_class)) {
903 thread_unlock(td);
904 goto nextproc;
905 }
906 slptime = (ticks - td->td_slptick) / hz;
907 /*
908 * Guarantee swap_idle_threshold1
909 * time in memory.
910 */
911 if (slptime < swap_idle_threshold1) {
912 thread_unlock(td);
913 goto nextproc;
914 }
915
916 /*
917 * Do not swapout a process if it is
918 * waiting on a critical event of some
919 * kind or there is a thread whose
920 * pageable memory may be accessed.
921 *
922 * This could be refined to support
923 * swapping out a thread.
924 */
925 if (!thread_safetoswapout(td)) {
926 thread_unlock(td);
927 goto nextproc;
928 }
929 /*
930 * If the system is under memory stress,
931 * or if we are swapping
932 * idle processes >= swap_idle_threshold2,
933 * then swap the process out.
934 */
935 if (((action & VM_SWAP_NORMAL) == 0) &&
936 (((action & VM_SWAP_IDLE) == 0) ||
937 (slptime < swap_idle_threshold2))) {
938 thread_unlock(td);
939 goto nextproc;
940 }
941
942 if (minslptime > slptime)
943 minslptime = slptime;
944 thread_unlock(td);
945 }
946
947 /*
948 * If the pageout daemon didn't free enough pages,
949 * or if this process is idle and the system is
950 * configured to swap proactively, swap it out.
951 */
952 if ((action & VM_SWAP_NORMAL) ||
953 ((action & VM_SWAP_IDLE) &&
954 (minslptime > swap_idle_threshold2))) {
955 if (swapout(p) == 0)
956 didswap++;
957 PROC_UNLOCK(p);
958 vm_map_unlock(&vm->vm_map);
959 vmspace_free(vm);
960 sx_sunlock(&allproc_lock);
961 goto retry;
962 }
963 }
964nextproc:
965 PROC_UNLOCK(p);
966 vm_map_unlock(&vm->vm_map);
967nextproc1:
968 vmspace_free(vm);
969 continue;
970 }
971 sx_sunlock(&allproc_lock);
972 /*
973 * If we swapped something out, and another process needed memory,
974 * then wakeup the sched process.
975 */
976 if (didswap)
977 wakeup(&proc0);
978}
979
980static void
981swapclear(p)
982 struct proc *p;
983{
984 struct thread *td;
985
986 PROC_LOCK_ASSERT(p, MA_OWNED);
987
988 FOREACH_THREAD_IN_PROC(p, td) {
989 thread_lock(td);
990 td->td_flags |= TDF_INMEM;
991 td->td_flags &= ~TDF_SWAPINREQ;
992 TD_CLR_SWAPPED(td);
993 if (TD_CAN_RUN(td))
994 if (setrunnable(td)) {
995#ifdef INVARIANTS
996 /*
997 * XXX: We just cleared TDI_SWAPPED
998 * above and set TDF_INMEM, so this
999 * should never happen.
1000 */
1001 panic("not waking up swapper");
1002#endif
1003 }
1004 thread_unlock(td);
1005 }
1006 p->p_flag &= ~(P_SWAPPINGIN|P_SWAPPINGOUT);
1007 p->p_flag |= P_INMEM;
1008}
1009
1010static int
1011swapout(p)
1012 struct proc *p;
1013{
1014 struct thread *td;
1015
1016 PROC_LOCK_ASSERT(p, MA_OWNED);
1017#if defined(SWAP_DEBUG)
1018 printf("swapping out %d\n", p->p_pid);
1019#endif
1020
1021 /*
1022 * The states of this process and its threads may have changed
1023 * by now. Assuming that there is only one pageout daemon thread,
1024 * this process should still be in memory.
1025 */
1026 KASSERT((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) == P_INMEM,
1027 ("swapout: lost a swapout race?"));
1028
1029 /*
1030 * remember the process resident count
1031 */
1032 p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
1033 /*
1034 * Check and mark all threads before we proceed.
1035 */
1036 p->p_flag &= ~P_INMEM;
1037 p->p_flag |= P_SWAPPINGOUT;
1038 FOREACH_THREAD_IN_PROC(p, td) {
1039 thread_lock(td);
1040 if (!thread_safetoswapout(td)) {
1041 thread_unlock(td);
1042 swapclear(p);
1043 return (EBUSY);
1044 }
1045 td->td_flags &= ~TDF_INMEM;
1046 TD_SET_SWAPPED(td);
1047 thread_unlock(td);
1048 }
1049 td = FIRST_THREAD_IN_PROC(p);
1050 ++td->td_ru.ru_nswap;
1051 PROC_UNLOCK(p);
1052
1053 /*
1054 * This list is stable because all threads are now prevented from
1055 * running. The list is only modified in the context of a running
1056 * thread in this process.
1057 */
1058 FOREACH_THREAD_IN_PROC(p, td)
1059 vm_thread_swapout(td);
1060
1061 PROC_LOCK(p);
1062 p->p_flag &= ~P_SWAPPINGOUT;
1063 p->p_swtick = ticks;
1064 return (0);
1065}
1066#endif /* !NO_SWAPPING */