Deleted Added
full compact
1/*-
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94
33 *
34 *
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
37 *
38 * Permission to use, copy, modify and distribute this software and
39 * its documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
46 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie the
56 * rights to redistribute these changes.
57 */
58
59#include <sys/cdefs.h>
60__FBSDID("$FreeBSD: head/sys/vm/vm_glue.c 217192 2011-01-09 12:50:44Z kib $");
60__FBSDID("$FreeBSD: head/sys/vm/vm_glue.c 220373 2011-04-05 20:23:59Z trasz $");
61
62#include "opt_vm.h"
63#include "opt_kstack_pages.h"
64#include "opt_kstack_max_pages.h"
65
66#include <sys/param.h>
67#include <sys/systm.h>
68#include <sys/limits.h>
69#include <sys/lock.h>
70#include <sys/mutex.h>
71#include <sys/proc.h>
72#include <sys/racct.h>
73#include <sys/resourcevar.h>
74#include <sys/sched.h>
75#include <sys/sf_buf.h>
76#include <sys/shm.h>
77#include <sys/vmmeter.h>
78#include <sys/sx.h>
79#include <sys/sysctl.h>
80
81#include <sys/eventhandler.h>
82#include <sys/kernel.h>
83#include <sys/ktr.h>
84#include <sys/unistd.h>
85
86#include <vm/vm.h>
87#include <vm/vm_param.h>
88#include <vm/pmap.h>
89#include <vm/vm_map.h>
90#include <vm/vm_page.h>
91#include <vm/vm_pageout.h>
92#include <vm/vm_object.h>
93#include <vm/vm_kern.h>
94#include <vm/vm_extern.h>
95#include <vm/vm_pager.h>
96#include <vm/swap_pager.h>
97
98/*
99 * System initialization
100 *
101 * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
102 *
103 * Note: run scheduling should be divorced from the vm system.
104 */
105static void scheduler(void *);
106SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_ANY, scheduler, NULL);
107
108#ifndef NO_SWAPPING
109static int swapout(struct proc *);
110static void swapclear(struct proc *);
111static void vm_thread_swapin(struct thread *td);
112static void vm_thread_swapout(struct thread *td);
113#endif
114
115/*
116 * MPSAFE
117 *
118 * WARNING! This code calls vm_map_check_protection() which only checks
119 * the associated vm_map_entry range. It does not determine whether the
120 * contents of the memory is actually readable or writable. In most cases
121 * just checking the vm_map_entry is sufficient within the kernel's address
122 * space.
123 */
124int
125kernacc(addr, len, rw)
126 void *addr;
127 int len, rw;
128{
129 boolean_t rv;
130 vm_offset_t saddr, eaddr;
131 vm_prot_t prot;
132
133 KASSERT((rw & ~VM_PROT_ALL) == 0,
134 ("illegal ``rw'' argument to kernacc (%x)\n", rw));
135
136 if ((vm_offset_t)addr + len > kernel_map->max_offset ||
137 (vm_offset_t)addr + len < (vm_offset_t)addr)
138 return (FALSE);
139
140 prot = rw;
141 saddr = trunc_page((vm_offset_t)addr);
142 eaddr = round_page((vm_offset_t)addr + len);
143 vm_map_lock_read(kernel_map);
144 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
145 vm_map_unlock_read(kernel_map);
146 return (rv == TRUE);
147}
148
149/*
150 * MPSAFE
151 *
152 * WARNING! This code calls vm_map_check_protection() which only checks
153 * the associated vm_map_entry range. It does not determine whether the
154 * contents of the memory is actually readable or writable. vmapbuf(),
155 * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
156 * used in conjuction with this call.
157 */
158int
159useracc(addr, len, rw)
160 void *addr;
161 int len, rw;
162{
163 boolean_t rv;
164 vm_prot_t prot;
165 vm_map_t map;
166
167 KASSERT((rw & ~VM_PROT_ALL) == 0,
168 ("illegal ``rw'' argument to useracc (%x)\n", rw));
169 prot = rw;
170 map = &curproc->p_vmspace->vm_map;
171 if ((vm_offset_t)addr + len > vm_map_max(map) ||
172 (vm_offset_t)addr + len < (vm_offset_t)addr) {
173 return (FALSE);
174 }
175 vm_map_lock_read(map);
176 rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
177 round_page((vm_offset_t)addr + len), prot);
178 vm_map_unlock_read(map);
179 return (rv == TRUE);
180}
181
182int
183vslock(void *addr, size_t len)
184{
185 vm_offset_t end, last, start;
186 unsigned long nsize;
187 vm_size_t npages;
188 int error;
189
190 last = (vm_offset_t)addr + len;
191 start = trunc_page((vm_offset_t)addr);
192 end = round_page(last);
193 if (last < (vm_offset_t)addr || end < (vm_offset_t)addr)
194 return (EINVAL);
195 npages = atop(end - start);
196 if (npages > vm_page_max_wired)
197 return (ENOMEM);
198 PROC_LOCK(curproc);
197 if (ptoa(npages +
198 pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map))) >
199 lim_cur(curproc, RLIMIT_MEMLOCK)) {
199 nsize = ptoa(npages +
200 pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map)));
201 if (nsize > lim_cur(curproc, RLIMIT_MEMLOCK)) {
202 PROC_UNLOCK(curproc);
203 return (ENOMEM);
204 }
205 if (racct_set(curproc, RACCT_MEMLOCK, nsize)) {
206 PROC_UNLOCK(curproc);
207 return (ENOMEM);
208 }
209 PROC_UNLOCK(curproc);
210#if 0
211 /*
212 * XXX - not yet
213 *
214 * The limit for transient usage of wired pages should be
215 * larger than for "permanent" wired pages (mlock()).
216 *
217 * Also, the sysctl code, which is the only present user
218 * of vslock(), does a hard loop on EAGAIN.
219 */
220 if (npages + cnt.v_wire_count > vm_page_max_wired)
221 return (EAGAIN);
222#endif
223 error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
224 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
225 if (error != KERN_SUCCESS) {
226 PROC_LOCK(curproc);
227 racct_set(curproc, RACCT_MEMLOCK,
228 ptoa(pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map))));
229 PROC_UNLOCK(curproc);
230 }
231 /*
232 * Return EFAULT on error to match copy{in,out}() behaviour
233 * rather than returning ENOMEM like mlock() would.
234 */
235 return (error == KERN_SUCCESS ? 0 : EFAULT);
236}
237
238void
239vsunlock(void *addr, size_t len)
240{
241
242 /* Rely on the parameter sanity checks performed by vslock(). */
243 (void)vm_map_unwire(&curproc->p_vmspace->vm_map,
244 trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
245 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
246
247 PROC_LOCK(curproc);
248 racct_set(curproc, RACCT_MEMLOCK,
249 ptoa(pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map))));
250 PROC_UNLOCK(curproc);
251}
252
253/*
254 * Pin the page contained within the given object at the given offset. If the
255 * page is not resident, allocate and load it using the given object's pager.
256 * Return the pinned page if successful; otherwise, return NULL.
257 */
258static vm_page_t
259vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
260{
261 vm_page_t m, ma[1];
262 vm_pindex_t pindex;
263 int rv;
264
265 VM_OBJECT_LOCK(object);
266 pindex = OFF_TO_IDX(offset);
267 m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
268 if (m->valid != VM_PAGE_BITS_ALL) {
269 ma[0] = m;
270 rv = vm_pager_get_pages(object, ma, 1, 0);
271 m = vm_page_lookup(object, pindex);
272 if (m == NULL)
273 goto out;
274 if (rv != VM_PAGER_OK) {
275 vm_page_lock(m);
276 vm_page_free(m);
277 vm_page_unlock(m);
278 m = NULL;
279 goto out;
280 }
281 }
282 vm_page_lock(m);
283 vm_page_hold(m);
284 vm_page_unlock(m);
285 vm_page_wakeup(m);
286out:
287 VM_OBJECT_UNLOCK(object);
288 return (m);
289}
290
291/*
292 * Return a CPU private mapping to the page at the given offset within the
293 * given object. The page is pinned before it is mapped.
294 */
295struct sf_buf *
296vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset)
297{
298 vm_page_t m;
299
300 m = vm_imgact_hold_page(object, offset);
301 if (m == NULL)
302 return (NULL);
303 sched_pin();
304 return (sf_buf_alloc(m, SFB_CPUPRIVATE));
305}
306
307/*
308 * Destroy the given CPU private mapping and unpin the page that it mapped.
309 */
310void
311vm_imgact_unmap_page(struct sf_buf *sf)
312{
313 vm_page_t m;
314
315 m = sf_buf_page(sf);
316 sf_buf_free(sf);
317 sched_unpin();
318 vm_page_lock(m);
319 vm_page_unhold(m);
320 vm_page_unlock(m);
321}
322
323void
324vm_sync_icache(vm_map_t map, vm_offset_t va, vm_offset_t sz)
325{
326
327 pmap_sync_icache(map->pmap, va, sz);
328}
329
330struct kstack_cache_entry {
331 vm_object_t ksobj;
332 struct kstack_cache_entry *next_ks_entry;
333};
334
335static struct kstack_cache_entry *kstack_cache;
336static int kstack_cache_size = 128;
337static int kstacks;
338static struct mtx kstack_cache_mtx;
339SYSCTL_INT(_vm, OID_AUTO, kstack_cache_size, CTLFLAG_RW, &kstack_cache_size, 0,
340 "");
341SYSCTL_INT(_vm, OID_AUTO, kstacks, CTLFLAG_RD, &kstacks, 0,
342 "");
343
344#ifndef KSTACK_MAX_PAGES
345#define KSTACK_MAX_PAGES 32
346#endif
347
348/*
349 * Create the kernel stack (including pcb for i386) for a new thread.
350 * This routine directly affects the fork perf for a process and
351 * create performance for a thread.
352 */
353int
354vm_thread_new(struct thread *td, int pages)
355{
356 vm_object_t ksobj;
357 vm_offset_t ks;
358 vm_page_t m, ma[KSTACK_MAX_PAGES];
359 struct kstack_cache_entry *ks_ce;
360 int i;
361
362 /* Bounds check */
363 if (pages <= 1)
364 pages = KSTACK_PAGES;
365 else if (pages > KSTACK_MAX_PAGES)
366 pages = KSTACK_MAX_PAGES;
367
368 if (pages == KSTACK_PAGES) {
369 mtx_lock(&kstack_cache_mtx);
370 if (kstack_cache != NULL) {
371 ks_ce = kstack_cache;
372 kstack_cache = ks_ce->next_ks_entry;
373 mtx_unlock(&kstack_cache_mtx);
374
375 td->td_kstack_obj = ks_ce->ksobj;
376 td->td_kstack = (vm_offset_t)ks_ce;
377 td->td_kstack_pages = KSTACK_PAGES;
378 return (1);
379 }
380 mtx_unlock(&kstack_cache_mtx);
381 }
382
383 /*
384 * Allocate an object for the kstack.
385 */
386 ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
387
388 /*
389 * Get a kernel virtual address for this thread's kstack.
390 */
391#if defined(__mips__)
392 /*
393 * We need to align the kstack's mapped address to fit within
394 * a single TLB entry.
395 */
396 ks = kmem_alloc_nofault_space(kernel_map,
397 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE, VMFS_TLB_ALIGNED_SPACE);
398#else
399 ks = kmem_alloc_nofault(kernel_map,
400 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
401#endif
402 if (ks == 0) {
403 printf("vm_thread_new: kstack allocation failed\n");
404 vm_object_deallocate(ksobj);
405 return (0);
406 }
407
408 atomic_add_int(&kstacks, 1);
409 if (KSTACK_GUARD_PAGES != 0) {
410 pmap_qremove(ks, KSTACK_GUARD_PAGES);
411 ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
412 }
413 td->td_kstack_obj = ksobj;
414 td->td_kstack = ks;
415 /*
416 * Knowing the number of pages allocated is useful when you
417 * want to deallocate them.
418 */
419 td->td_kstack_pages = pages;
420 /*
421 * For the length of the stack, link in a real page of ram for each
422 * page of stack.
423 */
424 VM_OBJECT_LOCK(ksobj);
425 for (i = 0; i < pages; i++) {
426 /*
427 * Get a kernel stack page.
428 */
429 m = vm_page_grab(ksobj, i, VM_ALLOC_NOBUSY |
430 VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
431 ma[i] = m;
432 m->valid = VM_PAGE_BITS_ALL;
433 }
434 VM_OBJECT_UNLOCK(ksobj);
435 pmap_qenter(ks, ma, pages);
436 return (1);
437}
438
439static void
440vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages)
441{
442 vm_page_t m;
443 int i;
444
445 atomic_add_int(&kstacks, -1);
446 pmap_qremove(ks, pages);
447 VM_OBJECT_LOCK(ksobj);
448 for (i = 0; i < pages; i++) {
449 m = vm_page_lookup(ksobj, i);
450 if (m == NULL)
451 panic("vm_thread_dispose: kstack already missing?");
452 vm_page_lock(m);
453 vm_page_unwire(m, 0);
454 vm_page_free(m);
455 vm_page_unlock(m);
456 }
457 VM_OBJECT_UNLOCK(ksobj);
458 vm_object_deallocate(ksobj);
459 kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
460 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
461}
462
463/*
464 * Dispose of a thread's kernel stack.
465 */
466void
467vm_thread_dispose(struct thread *td)
468{
469 vm_object_t ksobj;
470 vm_offset_t ks;
471 struct kstack_cache_entry *ks_ce;
472 int pages;
473
474 pages = td->td_kstack_pages;
475 ksobj = td->td_kstack_obj;
476 ks = td->td_kstack;
477 td->td_kstack = 0;
478 td->td_kstack_pages = 0;
479 if (pages == KSTACK_PAGES && kstacks <= kstack_cache_size) {
480 ks_ce = (struct kstack_cache_entry *)ks;
481 ks_ce->ksobj = ksobj;
482 mtx_lock(&kstack_cache_mtx);
483 ks_ce->next_ks_entry = kstack_cache;
484 kstack_cache = ks_ce;
485 mtx_unlock(&kstack_cache_mtx);
486 return;
487 }
488 vm_thread_stack_dispose(ksobj, ks, pages);
489}
490
491static void
492vm_thread_stack_lowmem(void *nulll)
493{
494 struct kstack_cache_entry *ks_ce, *ks_ce1;
495
496 mtx_lock(&kstack_cache_mtx);
497 ks_ce = kstack_cache;
498 kstack_cache = NULL;
499 mtx_unlock(&kstack_cache_mtx);
500
501 while (ks_ce != NULL) {
502 ks_ce1 = ks_ce;
503 ks_ce = ks_ce->next_ks_entry;
504
505 vm_thread_stack_dispose(ks_ce1->ksobj, (vm_offset_t)ks_ce1,
506 KSTACK_PAGES);
507 }
508}
509
510static void
511kstack_cache_init(void *nulll)
512{
513
514 EVENTHANDLER_REGISTER(vm_lowmem, vm_thread_stack_lowmem, NULL,
515 EVENTHANDLER_PRI_ANY);
516}
517
518MTX_SYSINIT(kstack_cache, &kstack_cache_mtx, "kstkch", MTX_DEF);
519SYSINIT(vm_kstacks, SI_SUB_KTHREAD_INIT, SI_ORDER_ANY, kstack_cache_init, NULL);
520
521#ifndef NO_SWAPPING
522/*
523 * Allow a thread's kernel stack to be paged out.
524 */
525static void
526vm_thread_swapout(struct thread *td)
527{
528 vm_object_t ksobj;
529 vm_page_t m;
530 int i, pages;
531
532 cpu_thread_swapout(td);
533 pages = td->td_kstack_pages;
534 ksobj = td->td_kstack_obj;
535 pmap_qremove(td->td_kstack, pages);
536 VM_OBJECT_LOCK(ksobj);
537 for (i = 0; i < pages; i++) {
538 m = vm_page_lookup(ksobj, i);
539 if (m == NULL)
540 panic("vm_thread_swapout: kstack already missing?");
541 vm_page_dirty(m);
542 vm_page_lock(m);
543 vm_page_unwire(m, 0);
544 vm_page_unlock(m);
545 }
546 VM_OBJECT_UNLOCK(ksobj);
547}
548
549/*
550 * Bring the kernel stack for a specified thread back in.
551 */
552static void
553vm_thread_swapin(struct thread *td)
554{
555 vm_object_t ksobj;
556 vm_page_t ma[KSTACK_MAX_PAGES];
557 int i, j, k, pages, rv;
558
559 pages = td->td_kstack_pages;
560 ksobj = td->td_kstack_obj;
561 VM_OBJECT_LOCK(ksobj);
562 for (i = 0; i < pages; i++)
563 ma[i] = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY |
564 VM_ALLOC_WIRED);
565 for (i = 0; i < pages; i++) {
566 if (ma[i]->valid != VM_PAGE_BITS_ALL) {
567 KASSERT(ma[i]->oflags & VPO_BUSY,
568 ("lost busy 1"));
569 vm_object_pip_add(ksobj, 1);
570 for (j = i + 1; j < pages; j++) {
571 KASSERT(ma[j]->valid == VM_PAGE_BITS_ALL ||
572 (ma[j]->oflags & VPO_BUSY),
573 ("lost busy 2"));
574 if (ma[j]->valid == VM_PAGE_BITS_ALL)
575 break;
576 }
577 rv = vm_pager_get_pages(ksobj, ma + i, j - i, 0);
578 if (rv != VM_PAGER_OK)
579 panic("vm_thread_swapin: cannot get kstack for proc: %d",
580 td->td_proc->p_pid);
581 vm_object_pip_wakeup(ksobj);
582 for (k = i; k < j; k++)
583 ma[k] = vm_page_lookup(ksobj, k);
584 vm_page_wakeup(ma[i]);
585 } else if (ma[i]->oflags & VPO_BUSY)
586 vm_page_wakeup(ma[i]);
587 }
588 VM_OBJECT_UNLOCK(ksobj);
589 pmap_qenter(td->td_kstack, ma, pages);
590 cpu_thread_swapin(td);
591}
592#endif /* !NO_SWAPPING */
593
594/*
595 * Implement fork's actions on an address space.
596 * Here we arrange for the address space to be copied or referenced,
597 * allocate a user struct (pcb and kernel stack), then call the
598 * machine-dependent layer to fill those in and make the new process
599 * ready to run. The new process is set up so that it returns directly
600 * to user mode to avoid stack copying and relocation problems.
601 */
602int
603vm_forkproc(td, p2, td2, vm2, flags)
604 struct thread *td;
605 struct proc *p2;
606 struct thread *td2;
607 struct vmspace *vm2;
608 int flags;
609{
610 struct proc *p1 = td->td_proc;
611 int error;
612
613 if ((flags & RFPROC) == 0) {
614 /*
615 * Divorce the memory, if it is shared, essentially
616 * this changes shared memory amongst threads, into
617 * COW locally.
618 */
619 if ((flags & RFMEM) == 0) {
620 if (p1->p_vmspace->vm_refcnt > 1) {
621 error = vmspace_unshare(p1);
622 if (error)
623 return (error);
624 }
625 }
626 cpu_fork(td, p2, td2, flags);
627 return (0);
628 }
629
630 if (flags & RFMEM) {
631 p2->p_vmspace = p1->p_vmspace;
632 atomic_add_int(&p1->p_vmspace->vm_refcnt, 1);
633 }
634
635 while (vm_page_count_severe()) {
636 VM_WAIT;
637 }
638
639 if ((flags & RFMEM) == 0) {
640 p2->p_vmspace = vm2;
641 if (p1->p_vmspace->vm_shm)
642 shmfork(p1, p2);
643 }
644
645 /*
646 * cpu_fork will copy and update the pcb, set up the kernel stack,
647 * and make the child ready to run.
648 */
649 cpu_fork(td, p2, td2, flags);
650 return (0);
651}
652
653/*
654 * Called after process has been wait(2)'ed apon and is being reaped.
655 * The idea is to reclaim resources that we could not reclaim while
656 * the process was still executing.
657 */
658void
659vm_waitproc(p)
660 struct proc *p;
661{
662
663 vmspace_exitfree(p); /* and clean-out the vmspace */
664}
665
666void
667faultin(p)
668 struct proc *p;
669{
670#ifdef NO_SWAPPING
671
672 PROC_LOCK_ASSERT(p, MA_OWNED);
673 if ((p->p_flag & P_INMEM) == 0)
674 panic("faultin: proc swapped out with NO_SWAPPING!");
675#else /* !NO_SWAPPING */
676 struct thread *td;
677
678 PROC_LOCK_ASSERT(p, MA_OWNED);
679 /*
680 * If another process is swapping in this process,
681 * just wait until it finishes.
682 */
683 if (p->p_flag & P_SWAPPINGIN) {
684 while (p->p_flag & P_SWAPPINGIN)
685 msleep(&p->p_flag, &p->p_mtx, PVM, "faultin", 0);
686 return;
687 }
688 if ((p->p_flag & P_INMEM) == 0) {
689 /*
690 * Don't let another thread swap process p out while we are
691 * busy swapping it in.
692 */
693 ++p->p_lock;
694 p->p_flag |= P_SWAPPINGIN;
695 PROC_UNLOCK(p);
696
697 /*
698 * We hold no lock here because the list of threads
699 * can not change while all threads in the process are
700 * swapped out.
701 */
702 FOREACH_THREAD_IN_PROC(p, td)
703 vm_thread_swapin(td);
704 PROC_LOCK(p);
705 swapclear(p);
706 p->p_swtick = ticks;
707
708 wakeup(&p->p_flag);
709
710 /* Allow other threads to swap p out now. */
711 --p->p_lock;
712 }
713#endif /* NO_SWAPPING */
714}
715
716/*
717 * This swapin algorithm attempts to swap-in processes only if there
718 * is enough space for them. Of course, if a process waits for a long
719 * time, it will be swapped in anyway.
720 *
721 * Giant is held on entry.
722 */
723/* ARGSUSED*/
724static void
725scheduler(dummy)
726 void *dummy;
727{
728 struct proc *p;
729 struct thread *td;
730 struct proc *pp;
731 int slptime;
732 int swtime;
733 int ppri;
734 int pri;
735
736 mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED);
737 mtx_unlock(&Giant);
738
739loop:
740 if (vm_page_count_min()) {
741 VM_WAIT;
742 goto loop;
743 }
744
745 pp = NULL;
746 ppri = INT_MIN;
747 sx_slock(&allproc_lock);
748 FOREACH_PROC_IN_SYSTEM(p) {
749 PROC_LOCK(p);
750 if (p->p_flag & (P_SWAPPINGOUT | P_SWAPPINGIN | P_INMEM)) {
751 PROC_UNLOCK(p);
752 continue;
753 }
754 swtime = (ticks - p->p_swtick) / hz;
755 FOREACH_THREAD_IN_PROC(p, td) {
756 /*
757 * An otherwise runnable thread of a process
758 * swapped out has only the TDI_SWAPPED bit set.
759 *
760 */
761 thread_lock(td);
762 if (td->td_inhibitors == TDI_SWAPPED) {
763 slptime = (ticks - td->td_slptick) / hz;
764 pri = swtime + slptime;
765 if ((td->td_flags & TDF_SWAPINREQ) == 0)
766 pri -= p->p_nice * 8;
767 /*
768 * if this thread is higher priority
769 * and there is enough space, then select
770 * this process instead of the previous
771 * selection.
772 */
773 if (pri > ppri) {
774 pp = p;
775 ppri = pri;
776 }
777 }
778 thread_unlock(td);
779 }
780 PROC_UNLOCK(p);
781 }
782 sx_sunlock(&allproc_lock);
783
784 /*
785 * Nothing to do, back to sleep.
786 */
787 if ((p = pp) == NULL) {
788 tsleep(&proc0, PVM, "sched", MAXSLP * hz / 2);
789 goto loop;
790 }
791 PROC_LOCK(p);
792
793 /*
794 * Another process may be bringing or may have already
795 * brought this process in while we traverse all threads.
796 * Or, this process may even be being swapped out again.
797 */
798 if (p->p_flag & (P_INMEM | P_SWAPPINGOUT | P_SWAPPINGIN)) {
799 PROC_UNLOCK(p);
800 goto loop;
801 }
802
803 /*
804 * We would like to bring someone in. (only if there is space).
805 * [What checks the space? ]
806 */
807 faultin(p);
808 PROC_UNLOCK(p);
809 goto loop;
810}
811
812void
813kick_proc0(void)
814{
815
816 wakeup(&proc0);
817}
818
819#ifndef NO_SWAPPING
820
821/*
822 * Swap_idle_threshold1 is the guaranteed swapped in time for a process
823 */
824static int swap_idle_threshold1 = 2;
825SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW,
826 &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process");
827
828/*
829 * Swap_idle_threshold2 is the time that a process can be idle before
830 * it will be swapped out, if idle swapping is enabled.
831 */
832static int swap_idle_threshold2 = 10;
833SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW,
834 &swap_idle_threshold2, 0, "Time before a process will be swapped out");
835
836/*
837 * First, if any processes have been sleeping or stopped for at least
838 * "swap_idle_threshold1" seconds, they are swapped out. If, however,
839 * no such processes exist, then the longest-sleeping or stopped
840 * process is swapped out. Finally, and only as a last resort, if
841 * there are no sleeping or stopped processes, the longest-resident
842 * process is swapped out.
843 */
844void
845swapout_procs(action)
846int action;
847{
848 struct proc *p;
849 struct thread *td;
850 int didswap = 0;
851
852retry:
853 sx_slock(&allproc_lock);
854 FOREACH_PROC_IN_SYSTEM(p) {
855 struct vmspace *vm;
856 int minslptime = 100000;
857 int slptime;
858
859 /*
860 * Watch out for a process in
861 * creation. It may have no
862 * address space or lock yet.
863 */
864 if (p->p_state == PRS_NEW)
865 continue;
866 /*
867 * An aio daemon switches its
868 * address space while running.
869 * Perform a quick check whether
870 * a process has P_SYSTEM.
871 */
872 if ((p->p_flag & P_SYSTEM) != 0)
873 continue;
874 /*
875 * Do not swapout a process that
876 * is waiting for VM data
877 * structures as there is a possible
878 * deadlock. Test this first as
879 * this may block.
880 *
881 * Lock the map until swapout
882 * finishes, or a thread of this
883 * process may attempt to alter
884 * the map.
885 */
886 vm = vmspace_acquire_ref(p);
887 if (vm == NULL)
888 continue;
889 if (!vm_map_trylock(&vm->vm_map))
890 goto nextproc1;
891
892 PROC_LOCK(p);
893 if (p->p_lock != 0 ||
894 (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT)
895 ) != 0) {
896 goto nextproc;
897 }
898 /*
899 * only aiod changes vmspace, however it will be
900 * skipped because of the if statement above checking
901 * for P_SYSTEM
902 */
903 if ((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) != P_INMEM)
904 goto nextproc;
905
906 switch (p->p_state) {
907 default:
908 /* Don't swap out processes in any sort
909 * of 'special' state. */
910 break;
911
912 case PRS_NORMAL:
913 /*
914 * do not swapout a realtime process
915 * Check all the thread groups..
916 */
917 FOREACH_THREAD_IN_PROC(p, td) {
918 thread_lock(td);
919 if (PRI_IS_REALTIME(td->td_pri_class)) {
920 thread_unlock(td);
921 goto nextproc;
922 }
923 slptime = (ticks - td->td_slptick) / hz;
924 /*
925 * Guarantee swap_idle_threshold1
926 * time in memory.
927 */
928 if (slptime < swap_idle_threshold1) {
929 thread_unlock(td);
930 goto nextproc;
931 }
932
933 /*
934 * Do not swapout a process if it is
935 * waiting on a critical event of some
936 * kind or there is a thread whose
937 * pageable memory may be accessed.
938 *
939 * This could be refined to support
940 * swapping out a thread.
941 */
942 if (!thread_safetoswapout(td)) {
943 thread_unlock(td);
944 goto nextproc;
945 }
946 /*
947 * If the system is under memory stress,
948 * or if we are swapping
949 * idle processes >= swap_idle_threshold2,
950 * then swap the process out.
951 */
952 if (((action & VM_SWAP_NORMAL) == 0) &&
953 (((action & VM_SWAP_IDLE) == 0) ||
954 (slptime < swap_idle_threshold2))) {
955 thread_unlock(td);
956 goto nextproc;
957 }
958
959 if (minslptime > slptime)
960 minslptime = slptime;
961 thread_unlock(td);
962 }
963
964 /*
965 * If the pageout daemon didn't free enough pages,
966 * or if this process is idle and the system is
967 * configured to swap proactively, swap it out.
968 */
969 if ((action & VM_SWAP_NORMAL) ||
970 ((action & VM_SWAP_IDLE) &&
971 (minslptime > swap_idle_threshold2))) {
972 if (swapout(p) == 0)
973 didswap++;
974 PROC_UNLOCK(p);
975 vm_map_unlock(&vm->vm_map);
976 vmspace_free(vm);
977 sx_sunlock(&allproc_lock);
978 goto retry;
979 }
980 }
981nextproc:
982 PROC_UNLOCK(p);
983 vm_map_unlock(&vm->vm_map);
984nextproc1:
985 vmspace_free(vm);
986 continue;
987 }
988 sx_sunlock(&allproc_lock);
989 /*
990 * If we swapped something out, and another process needed memory,
991 * then wakeup the sched process.
992 */
993 if (didswap)
994 wakeup(&proc0);
995}
996
997static void
998swapclear(p)
999 struct proc *p;
1000{
1001 struct thread *td;
1002
1003 PROC_LOCK_ASSERT(p, MA_OWNED);
1004
1005 FOREACH_THREAD_IN_PROC(p, td) {
1006 thread_lock(td);
1007 td->td_flags |= TDF_INMEM;
1008 td->td_flags &= ~TDF_SWAPINREQ;
1009 TD_CLR_SWAPPED(td);
1010 if (TD_CAN_RUN(td))
1011 if (setrunnable(td)) {
1012#ifdef INVARIANTS
1013 /*
1014 * XXX: We just cleared TDI_SWAPPED
1015 * above and set TDF_INMEM, so this
1016 * should never happen.
1017 */
1018 panic("not waking up swapper");
1019#endif
1020 }
1021 thread_unlock(td);
1022 }
1023 p->p_flag &= ~(P_SWAPPINGIN|P_SWAPPINGOUT);
1024 p->p_flag |= P_INMEM;
1025}
1026
1027static int
1028swapout(p)
1029 struct proc *p;
1030{
1031 struct thread *td;
1032
1033 PROC_LOCK_ASSERT(p, MA_OWNED);
1034#if defined(SWAP_DEBUG)
1035 printf("swapping out %d\n", p->p_pid);
1036#endif
1037
1038 /*
1039 * The states of this process and its threads may have changed
1040 * by now. Assuming that there is only one pageout daemon thread,
1041 * this process should still be in memory.
1042 */
1043 KASSERT((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) == P_INMEM,
1044 ("swapout: lost a swapout race?"));
1045
1046 /*
1047 * remember the process resident count
1048 */
1049 p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
1050 /*
1051 * Check and mark all threads before we proceed.
1052 */
1053 p->p_flag &= ~P_INMEM;
1054 p->p_flag |= P_SWAPPINGOUT;
1055 FOREACH_THREAD_IN_PROC(p, td) {
1056 thread_lock(td);
1057 if (!thread_safetoswapout(td)) {
1058 thread_unlock(td);
1059 swapclear(p);
1060 return (EBUSY);
1061 }
1062 td->td_flags &= ~TDF_INMEM;
1063 TD_SET_SWAPPED(td);
1064 thread_unlock(td);
1065 }
1066 td = FIRST_THREAD_IN_PROC(p);
1067 ++td->td_ru.ru_nswap;
1068 PROC_UNLOCK(p);
1069
1070 /*
1071 * This list is stable because all threads are now prevented from
1072 * running. The list is only modified in the context of a running
1073 * thread in this process.
1074 */
1075 FOREACH_THREAD_IN_PROC(p, td)
1076 vm_thread_swapout(td);
1077
1078 PROC_LOCK(p);
1079 p->p_flag &= ~P_SWAPPINGOUT;
1080 p->p_swtick = ticks;
1081 return (0);
1082}
1083#endif /* !NO_SWAPPING */