vm_pageout.c revision 100415
1208538Sraj/* 2208538Sraj * Copyright (c) 1991 Regents of the University of California. 3208538Sraj * All rights reserved. 4208538Sraj * Copyright (c) 1994 John S. Dyson 5208538Sraj * All rights reserved. 6208538Sraj * Copyright (c) 1994 David Greenman 7208538Sraj * All rights reserved. 8208538Sraj * 9208538Sraj * This code is derived from software contributed to Berkeley by 10208538Sraj * The Mach Operating System project at Carnegie-Mellon University. 11208538Sraj * 12208538Sraj * Redistribution and use in source and binary forms, with or without 13208538Sraj * modification, are permitted provided that the following conditions 14208538Sraj * are met: 15208538Sraj * 1. Redistributions of source code must retain the above copyright 16208538Sraj * notice, this list of conditions and the following disclaimer. 17208538Sraj * 2. Redistributions in binary form must reproduce the above copyright 18208538Sraj * notice, this list of conditions and the following disclaimer in the 19208538Sraj * documentation and/or other materials provided with the distribution. 20208538Sraj * 3. All advertising materials mentioning features or use of this software 21208538Sraj * must display the following acknowledgement: 22208538Sraj * This product includes software developed by the University of 23208538Sraj * California, Berkeley and its contributors. 24208538Sraj * 4. Neither the name of the University nor the names of its contributors 25208538Sraj * may be used to endorse or promote products derived from this software 26208538Sraj * without specific prior written permission. 27208538Sraj * 28208538Sraj * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29208538Sraj * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30208538Sraj * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31208538Sraj * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32208538Sraj * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33208538Sraj * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34208538Sraj * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35208538Sraj * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36233230Sraj * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37233230Sraj * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38233230Sraj * SUCH DAMAGE. 39208538Sraj * 40208538Sraj * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 41208538Sraj * 42208538Sraj * 43208538Sraj * Copyright (c) 1987, 1990 Carnegie-Mellon University. 44208538Sraj * All rights reserved. 45208538Sraj * 46208538Sraj * Authors: Avadis Tevanian, Jr., Michael Wayne Young 47208538Sraj * 48208538Sraj * Permission to use, copy, modify and distribute this software and 49208538Sraj * its documentation is hereby granted, provided that both the copyright 50208538Sraj * notice and this permission notice appear in all copies of the 51208538Sraj * software, derivative works or modified versions, and any portions 52208538Sraj * thereof, and that both notices appear in supporting documentation. 53208538Sraj * 54208538Sraj * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55208538Sraj * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56208538Sraj * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57208538Sraj * 58235529Skientzle * Carnegie Mellon requests users of this software to return to 59235529Skientzle * 60233230Sraj * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61233230Sraj * School of Computer Science 62233230Sraj * Carnegie Mellon University 63243693Sgonzo * Pittsburgh PA 15213-3890 64243693Sgonzo * 65247201Skientzle * any improvements or extensions that they make and grant Carnegie the 66247250Skientzle * rights to redistribute these changes. 67247201Skientzle * 68247250Skientzle * $FreeBSD: head/sys/vm/vm_pageout.c 100415 2002-07-20 20:58:46Z alc $ 69247250Skientzle */ 70208538Sraj 71235529Skientzle/* 72235529Skientzle * The proverbial page-out daemon. 73247250Skientzle */ 74247250Skientzle 75247250Skientzle#include "opt_vm.h" 76235529Skientzle#include <sys/param.h> 77208538Sraj#include <sys/systm.h> 78243693Sgonzo#include <sys/kernel.h> 79243693Sgonzo#include <sys/lock.h> 80208538Sraj#include <sys/mutex.h> 81208538Sraj#include <sys/proc.h> 82243693Sgonzo#include <sys/kthread.h> 83208538Sraj#include <sys/ktr.h> 84208538Sraj#include <sys/resourcevar.h> 85208538Sraj#include <sys/signalvar.h> 86208538Sraj#include <sys/vnode.h> 87208538Sraj#include <sys/vmmeter.h> 88208538Sraj#include <sys/sx.h> 89208538Sraj#include <sys/sysctl.h> 90208538Sraj 91243693Sgonzo#include <vm/vm.h> 92208538Sraj#include <vm/vm_param.h> 93208538Sraj#include <vm/vm_object.h> 94208538Sraj#include <vm/vm_page.h> 95208538Sraj#include <vm/vm_map.h> 96208538Sraj#include <vm/vm_pageout.h> 97208538Sraj#include <vm/vm_pager.h> 98243693Sgonzo#include <vm/swap_pager.h> 99208538Sraj#include <vm/vm_extern.h> 100208538Sraj#include <vm/uma.h> 101208538Sraj 102243693Sgonzo#include <machine/mutex.h> 103243693Sgonzo 104243693Sgonzo/* 105243693Sgonzo * System initialization 106243693Sgonzo */ 107243693Sgonzo 108243693Sgonzo/* the kernel process "vm_pageout"*/ 109243693Sgonzostatic void vm_pageout(void); 110243693Sgonzostatic int vm_pageout_clean(vm_page_t); 111243693Sgonzostatic void vm_pageout_scan(int pass); 112243693Sgonzostatic int vm_pageout_free_page_calc(vm_size_t count); 113208538Srajstruct proc *pageproc; 114208538Sraj 115208538Srajstatic struct kproc_desc page_kp = { 116208538Sraj "pagedaemon", 117208538Sraj vm_pageout, 118233230Sraj &pageproc 119235529Skientzle}; 120233230SrajSYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp) 121248121Sian 122248121Sian#if !defined(NO_SWAPPING) 123233230Sraj/* the kernel process "vm_daemon"*/ 124248121Sianstatic void vm_daemon(void); 125233230Srajstatic struct proc *vmproc; 126233230Sraj 127233230Srajstatic struct kproc_desc vm_kp = { 128235529Skientzle "vmdaemon", 129248121Sian vm_daemon, 130233230Sraj &vmproc 131248934Skientzle}; 132235529SkientzleSYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp) 133233230Sraj#endif 134233230Sraj 135233230Sraj 136233230Srajint vm_pages_needed=0; /* Event on which pageout daemon sleeps */ 137233230Srajint vm_pageout_deficit=0; /* Estimated number of pages deficit */ 138233230Srajint vm_pageout_pages_needed=0; /* flag saying that the pageout daemon needs pages */ 139233230Sraj 140248121Sian#if !defined(NO_SWAPPING) 141248121Sianstatic int vm_pageout_req_swapout; /* XXX */ 142233230Srajstatic int vm_daemon_needed; 143233230Sraj#endif 144248121Sianextern int vm_swap_size; 145233230Srajstatic int vm_max_launder = 32; 146248121Sianstatic int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0; 147233230Srajstatic int vm_pageout_full_stats_interval = 0; 148233230Srajstatic int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0; 149248121Sianstatic int defer_swap_pageouts=0; 150233230Srajstatic int disable_swap_pageouts=0; 151248121Sian 152248121Sian#if defined(NO_SWAPPING) 153248121Sianstatic int vm_swap_enabled=0; 154248121Sianstatic int vm_swap_idle_enabled=0; 155248121Sian#else 156248121Sianstatic int vm_swap_enabled=1; 157233230Srajstatic int vm_swap_idle_enabled=0; 158233230Sraj#endif 159233230Sraj 160233230SrajSYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm, 161233230Sraj CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt"); 162233230Sraj 163233230SrajSYSCTL_INT(_vm, OID_AUTO, max_launder, 164233230Sraj CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout"); 165233230Sraj 166233230SrajSYSCTL_INT(_vm, OID_AUTO, pageout_stats_max, 167233230Sraj CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length"); 168233230Sraj 169233230SrajSYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval, 170235529Skientzle CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan"); 171235529Skientzle 172235529SkientzleSYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval, 173235529Skientzle CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan"); 174235529Skientzle 175233230SrajSYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max, 176233230Sraj CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented"); 177233230Sraj 178235529Skientzle#if defined(NO_SWAPPING) 179235529SkientzleSYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 180235529Skientzle CTLFLAG_RD, &vm_swap_enabled, 0, ""); 181233230SrajSYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 182233230Sraj CTLFLAG_RD, &vm_swap_idle_enabled, 0, ""); 183235529Skientzle#else 184233230SrajSYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 185233230Sraj CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout"); 186208538SrajSYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 187243693Sgonzo CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 188208538Sraj#endif 189235529Skientzle 190208538SrajSYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts, 191208538Sraj CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem"); 192243693Sgonzo 193243693SgonzoSYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 194243693Sgonzo CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); 195243693Sgonzo 196243693Sgonzostatic int pageout_lock_miss; 197243693SgonzoSYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss, 198243693Sgonzo CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout"); 199243693Sgonzo 200243693Sgonzo#define VM_PAGEOUT_PAGE_COUNT 16 201243693Sgonzoint vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 202243693Sgonzo 203243693Sgonzoint vm_page_max_wired; /* XXX max # of wired pages system-wide */ 204243693Sgonzo 205243693Sgonzo#if !defined(NO_SWAPPING) 206208538Srajtypedef void freeer_fcn_t(vm_map_t, vm_object_t, vm_pindex_t, int); 207243693Sgonzostatic void vm_pageout_map_deactivate_pages(vm_map_t, vm_pindex_t); 208208538Srajstatic freeer_fcn_t vm_pageout_object_deactivate_pages; 209243693Sgonzostatic void vm_req_vmdaemon(void); 210243693Sgonzo#endif 211208538Srajstatic void vm_pageout_page_stats(void); 212235529Skientzle 213235529Skientzle/* 214243693Sgonzo * vm_pageout_clean: 215235529Skientzle * 216235529Skientzle * Clean the page and remove it from the laundry. 217243693Sgonzo * 218235529Skientzle * We set the busy bit to cause potential page faults on this page to 219235529Skientzle * block. Note the careful timing, however, the busy bit isn't set till 220243693Sgonzo * late and we cannot do anything that will mess with the page. 221243693Sgonzo */ 222243693Sgonzostatic int 223208538Srajvm_pageout_clean(m) 224243693Sgonzo vm_page_t m; 225243693Sgonzo{ 226243693Sgonzo vm_object_t object; 227243693Sgonzo vm_page_t mc[2*vm_pageout_page_count]; 228247045Skientzle int pageout_count; 229243693Sgonzo int ib, is, page_base; 230265065Sian vm_pindex_t pindex = m->pindex; 231243693Sgonzo 232247250Skientzle GIANT_REQUIRED; 233265065Sian 234265065Sian object = m->object; 235265065Sian 236265065Sian /* 237265065Sian * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP 238265065Sian * with the new swapper, but we could have serious problems paging 239247250Skientzle * out other object types if there is insufficient memory. 240247250Skientzle * 241247250Skientzle * Unfortunately, checking free memory here is far too late, so the 242247045Skientzle * check has been moved up a procedural level. 243208538Sraj */ 244247250Skientzle 245247250Skientzle /* 246247250Skientzle * Don't mess with the page if it's busy, held, or special 247247250Skientzle */ 248247045Skientzle if ((m->hold_count != 0) || 249243693Sgonzo ((m->busy != 0) || (m->flags & (PG_BUSY|PG_UNMANAGED)))) { 250247045Skientzle return 0; 251247045Skientzle } 252247045Skientzle 253247045Skientzle mc[vm_pageout_page_count] = m; 254247045Skientzle pageout_count = 1; 255247201Skientzle page_base = vm_pageout_page_count; 256247201Skientzle ib = 1; 257247045Skientzle is = 1; 258247045Skientzle 259247045Skientzle /* 260247045Skientzle * Scan object for clusterable pages. 261247045Skientzle * 262247201Skientzle * We can cluster ONLY if: ->> the page is NOT 263247045Skientzle * clean, wired, busy, held, or mapped into a 264247201Skientzle * buffer, and one of the following: 265247201Skientzle * 1) The page is inactive, or a seldom used 266247201Skientzle * active page. 267247201Skientzle * -or- 268247201Skientzle * 2) we force the issue. 269247201Skientzle * 270247045Skientzle * During heavy mmap/modification loads the pageout 271247045Skientzle * daemon can really fragment the underlying file 272247045Skientzle * due to flushing pages out of order and not trying 273247045Skientzle * align the clusters (which leave sporatic out-of-order 274247045Skientzle * holes). To solve this problem we do the reverse scan 275247045Skientzle * first and attempt to align our cluster, then do a 276247045Skientzle * forward scan if room remains. 277247045Skientzle */ 278247045Skientzlemore: 279247045Skientzle while (ib && pageout_count < vm_pageout_page_count) { 280247045Skientzle vm_page_t p; 281247045Skientzle 282247045Skientzle if (ib > pindex) { 283247045Skientzle ib = 0; 284247045Skientzle break; 285247045Skientzle } 286208538Sraj 287208538Sraj if ((p = vm_page_lookup(object, pindex - ib)) == NULL) { 288208538Sraj ib = 0; 289208538Sraj break; 290208538Sraj } 291208538Sraj if (((p->queue - p->pc) == PQ_CACHE) || 292208538Sraj (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { 293208538Sraj ib = 0; 294208538Sraj break; 295208538Sraj } 296208538Sraj vm_page_test_dirty(p); 297208538Sraj if ((p->dirty & p->valid) == 0 || 298208538Sraj p->queue != PQ_INACTIVE || 299208538Sraj p->wire_count != 0 || /* may be held by buf cache */ 300208538Sraj p->hold_count != 0) { /* may be undergoing I/O */ 301208538Sraj ib = 0; 302208538Sraj break; 303208538Sraj } 304208538Sraj mc[--page_base] = p; 305208538Sraj ++pageout_count; 306208538Sraj ++ib; 307208538Sraj /* 308208538Sraj * alignment boundry, stop here and switch directions. Do 309208538Sraj * not clear ib. 310208538Sraj */ 311208538Sraj if ((pindex - (ib - 1)) % vm_pageout_page_count == 0) 312208538Sraj break; 313208538Sraj } 314208538Sraj 315208538Sraj while (pageout_count < vm_pageout_page_count && 316208538Sraj pindex + is < object->size) { 317208538Sraj vm_page_t p; 318208538Sraj 319208538Sraj if ((p = vm_page_lookup(object, pindex + is)) == NULL) 320208538Sraj break; 321208538Sraj if (((p->queue - p->pc) == PQ_CACHE) || 322208538Sraj (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { 323208538Sraj break; 324208538Sraj } 325208538Sraj vm_page_test_dirty(p); 326208538Sraj if ((p->dirty & p->valid) == 0 || 327208538Sraj p->queue != PQ_INACTIVE || 328208538Sraj p->wire_count != 0 || /* may be held by buf cache */ 329208538Sraj p->hold_count != 0) { /* may be undergoing I/O */ 330208538Sraj break; 331208538Sraj } 332208538Sraj mc[page_base + pageout_count] = p; 333208538Sraj ++pageout_count; 334208538Sraj ++is; 335208538Sraj } 336208538Sraj 337208538Sraj /* 338208538Sraj * If we exhausted our forward scan, continue with the reverse scan 339208538Sraj * when possible, even past a page boundry. This catches boundry 340247250Skientzle * conditions. 341208538Sraj */ 342208538Sraj if (ib && pageout_count < vm_pageout_page_count) 343208538Sraj goto more; 344208538Sraj 345208538Sraj /* 346208538Sraj * we allow reads during pageouts... 347208538Sraj */ 348208538Sraj return vm_pageout_flush(&mc[page_base], pageout_count, 0); 349208538Sraj} 350208538Sraj 351208538Sraj/* 352208538Sraj * vm_pageout_flush() - launder the given pages 353208538Sraj * 354208538Sraj * The given pages are laundered. Note that we setup for the start of 355208538Sraj * I/O ( i.e. busy the page ), mark it read-only, and bump the object 356208538Sraj * reference count all in here rather then in the parent. If we want 357208538Sraj * the parent to do more sophisticated things we may have to change 358208538Sraj * the ordering. 359208538Sraj */ 360208538Srajint 361208538Srajvm_pageout_flush(mc, count, flags) 362208538Sraj vm_page_t *mc; 363208538Sraj int count; 364208538Sraj int flags; 365208538Sraj{ 366208538Sraj vm_object_t object; 367208538Sraj int pageout_status[count]; 368208538Sraj int numpagedout = 0; 369208538Sraj int i; 370208538Sraj 371208538Sraj GIANT_REQUIRED; 372208538Sraj /* 373208538Sraj * Initiate I/O. Bump the vm_page_t->busy counter and 374208538Sraj * mark the pages read-only. 375208538Sraj * 376208538Sraj * We do not have to fixup the clean/dirty bits here... we can 377247250Skientzle * allow the pager to do it after the I/O completes. 378208538Sraj * 379208538Sraj * NOTE! mc[i]->dirty may be partial or fragmented due to an 380208538Sraj * edge case with file fragments. 381208538Sraj */ 382208538Sraj for (i = 0; i < count; i++) { 383208538Sraj KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, ("vm_pageout_flush page %p index %d/%d: partially invalid page", mc[i], i, count)); 384208538Sraj vm_page_io_start(mc[i]); 385235261Skientzle vm_page_protect(mc[i], VM_PROT_READ); 386235261Skientzle } 387208538Sraj 388208538Sraj object = mc[0]->object; 389208538Sraj vm_object_pip_add(object, count); 390208538Sraj 391208538Sraj vm_pager_put_pages(object, mc, count, 392208538Sraj (flags | ((object == kernel_object) ? OBJPC_SYNC : 0)), 393208538Sraj pageout_status); 394208538Sraj 395208538Sraj vm_page_lock_queues(); 396208538Sraj for (i = 0; i < count; i++) { 397208538Sraj vm_page_t mt = mc[i]; 398208538Sraj 399208538Sraj switch (pageout_status[i]) { 400208538Sraj case VM_PAGER_OK: 401208538Sraj numpagedout++; 402208538Sraj break; 403208538Sraj case VM_PAGER_PEND: 404208538Sraj numpagedout++; 405208538Sraj break; 406208538Sraj case VM_PAGER_BAD: 407208538Sraj /* 408208538Sraj * Page outside of range of object. Right now we 409208538Sraj * essentially lose the changes by pretending it 410208538Sraj * worked. 411208538Sraj */ 412208538Sraj pmap_clear_modify(mt); 413208538Sraj vm_page_undirty(mt); 414208538Sraj break; 415208538Sraj case VM_PAGER_ERROR: 416208538Sraj case VM_PAGER_FAIL: 417208538Sraj /* 418208538Sraj * If page couldn't be paged out, then reactivate the 419208538Sraj * page so it doesn't clog the inactive list. (We 420208538Sraj * will try paging out it again later). 421208538Sraj */ 422208538Sraj vm_page_activate(mt); 423208538Sraj break; 424208538Sraj case VM_PAGER_AGAIN: 425247250Skientzle break; 426208538Sraj } 427208538Sraj 428208538Sraj /* 429208538Sraj * If the operation is still going, leave the page busy to 430208538Sraj * block all other accesses. Also, leave the paging in 431208538Sraj * progress indicator set so that we don't attempt an object 432208538Sraj * collapse. 433208538Sraj */ 434208538Sraj if (pageout_status[i] != VM_PAGER_PEND) { 435208538Sraj vm_object_pip_wakeup(object); 436208538Sraj vm_page_io_finish(mt); 437208538Sraj if (!vm_page_count_severe() || !vm_page_try_to_cache(mt)) 438208538Sraj vm_page_protect(mt, VM_PROT_READ); 439208538Sraj } 440208538Sraj } 441208538Sraj vm_page_unlock_queues(); 442208538Sraj return numpagedout; 443208538Sraj} 444208538Sraj 445208538Sraj#if !defined(NO_SWAPPING) 446208538Sraj/* 447208538Sraj * vm_pageout_object_deactivate_pages 448208538Sraj * 449208538Sraj * deactivate enough pages to satisfy the inactive target 450208538Sraj * requirements or if vm_page_proc_limit is set, then 451208538Sraj * deactivate all of the pages in the object and its 452208538Sraj * backing_objects. 453208538Sraj * 454208538Sraj * The object and map must be locked. 455208538Sraj */ 456208538Srajstatic void 457247250Skientzlevm_pageout_object_deactivate_pages(map, object, desired, map_remove_only) 458208538Sraj vm_map_t map; 459208538Sraj vm_object_t object; 460208538Sraj vm_pindex_t desired; 461208538Sraj int map_remove_only; 462208538Sraj{ 463208538Sraj vm_page_t p, next; 464208538Sraj int rcount; 465243693Sgonzo int remove_mode; 466243693Sgonzo 467208538Sraj GIANT_REQUIRED; 468208538Sraj if (object->type == OBJT_DEVICE || object->type == OBJT_PHYS) 469208538Sraj return; 470208538Sraj 471208538Sraj while (object) { 472208538Sraj if (pmap_resident_count(vm_map_pmap(map)) <= desired) 473208538Sraj return; 474208538Sraj if (object->paging_in_progress) 475208538Sraj return; 476208538Sraj 477208538Sraj remove_mode = map_remove_only; 478208538Sraj if (object->shadow_count > 1) 479208538Sraj remove_mode = 1; 480208538Sraj /* 481208538Sraj * scan the objects entire memory queue 482208538Sraj */ 483208538Sraj rcount = object->resident_page_count; 484208538Sraj p = TAILQ_FIRST(&object->memq); 485208538Sraj while (p && (rcount-- > 0)) { 486208538Sraj int actcount; 487208538Sraj if (pmap_resident_count(vm_map_pmap(map)) <= desired) 488208538Sraj return; 489208538Sraj next = TAILQ_NEXT(p, listq); 490208538Sraj cnt.v_pdpages++; 491208538Sraj if (p->wire_count != 0 || 492208538Sraj p->hold_count != 0 || 493208538Sraj p->busy != 0 || 494208538Sraj (p->flags & (PG_BUSY|PG_UNMANAGED)) || 495208538Sraj !pmap_page_exists_quick(vm_map_pmap(map), p)) { 496208538Sraj p = next; 497208538Sraj continue; 498208538Sraj } 499208538Sraj 500208538Sraj actcount = pmap_ts_referenced(p); 501208538Sraj if (actcount) { 502208538Sraj vm_page_flag_set(p, PG_REFERENCED); 503208538Sraj } else if (p->flags & PG_REFERENCED) { 504208538Sraj actcount = 1; 505208538Sraj } 506243693Sgonzo 507243693Sgonzo if ((p->queue != PQ_ACTIVE) && 508243693Sgonzo (p->flags & PG_REFERENCED)) { 509243693Sgonzo vm_page_activate(p); 510243693Sgonzo p->act_count += actcount; 511243693Sgonzo vm_page_flag_clear(p, PG_REFERENCED); 512243693Sgonzo } else if (p->queue == PQ_ACTIVE) { 513243693Sgonzo if ((p->flags & PG_REFERENCED) == 0) { 514243693Sgonzo p->act_count -= min(p->act_count, ACT_DECLINE); 515243693Sgonzo if (!remove_mode && (vm_pageout_algorithm || (p->act_count == 0))) { 516243693Sgonzo vm_page_protect(p, VM_PROT_NONE); 517243693Sgonzo vm_page_deactivate(p); 518243693Sgonzo } else { 519243693Sgonzo vm_pageq_requeue(p); 520243693Sgonzo } 521243693Sgonzo } else { 522243693Sgonzo vm_page_activate(p); 523243693Sgonzo vm_page_flag_clear(p, PG_REFERENCED); 524243693Sgonzo if (p->act_count < (ACT_MAX - ACT_ADVANCE)) 525243693Sgonzo p->act_count += ACT_ADVANCE; 526243693Sgonzo vm_pageq_requeue(p); 527243693Sgonzo } 528243693Sgonzo } else if (p->queue == PQ_INACTIVE) { 529243693Sgonzo vm_page_protect(p, VM_PROT_NONE); 530243693Sgonzo } 531243693Sgonzo p = next; 532243693Sgonzo } 533243693Sgonzo object = object->backing_object; 534243693Sgonzo } 535243693Sgonzo return; 536243693Sgonzo} 537243693Sgonzo 538243693Sgonzo/* 539243693Sgonzo * deactivate some number of pages in a map, try to do it fairly, but 540243693Sgonzo * that is really hard to do. 541243693Sgonzo */ 542243693Sgonzostatic void 543243693Sgonzovm_pageout_map_deactivate_pages(map, desired) 544243693Sgonzo vm_map_t map; 545243693Sgonzo vm_pindex_t desired; 546243693Sgonzo{ 547243693Sgonzo vm_map_entry_t tmpe; 548243693Sgonzo vm_object_t obj, bigobj; 549243693Sgonzo int nothingwired; 550243693Sgonzo 551243693Sgonzo GIANT_REQUIRED; 552208538Sraj if (!vm_map_trylock(map)) 553208538Sraj return; 554208538Sraj 555208538Sraj bigobj = NULL; 556208538Sraj nothingwired = TRUE; 557208538Sraj 558208538Sraj /* 559208538Sraj * first, search out the biggest object, and try to free pages from 560208538Sraj * that. 561208538Sraj */ 562208538Sraj tmpe = map->header.next; 563208538Sraj while (tmpe != &map->header) { 564208538Sraj if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 565208538Sraj obj = tmpe->object.vm_object; 566208538Sraj if ((obj != NULL) && (obj->shadow_count <= 1) && 567208538Sraj ((bigobj == NULL) || 568208538Sraj (bigobj->resident_page_count < obj->resident_page_count))) { 569208538Sraj bigobj = obj; 570208538Sraj } 571208538Sraj } 572208538Sraj if (tmpe->wired_count > 0) 573208538Sraj nothingwired = FALSE; 574208538Sraj tmpe = tmpe->next; 575208538Sraj } 576208538Sraj 577208538Sraj if (bigobj) 578208538Sraj vm_pageout_object_deactivate_pages(map, bigobj, desired, 0); 579208538Sraj 580208538Sraj /* 581208538Sraj * Next, hunt around for other pages to deactivate. We actually 582208538Sraj * do this search sort of wrong -- .text first is not the best idea. 583208538Sraj */ 584208538Sraj tmpe = map->header.next; 585208538Sraj while (tmpe != &map->header) { 586208538Sraj if (pmap_resident_count(vm_map_pmap(map)) <= desired) 587208538Sraj break; 588208538Sraj if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 589208538Sraj obj = tmpe->object.vm_object; 590208538Sraj if (obj) 591208538Sraj vm_pageout_object_deactivate_pages(map, obj, desired, 0); 592208538Sraj } 593208538Sraj tmpe = tmpe->next; 594208538Sraj }; 595208538Sraj 596208538Sraj /* 597208538Sraj * Remove all mappings if a process is swapped out, this will free page 598208538Sraj * table pages. 599208538Sraj */ 600208538Sraj if (desired == 0 && nothingwired) 601208538Sraj pmap_remove(vm_map_pmap(map), 602208538Sraj VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); 603208538Sraj vm_map_unlock(map); 604208538Sraj return; 605208538Sraj} 606208538Sraj#endif /* !defined(NO_SWAPPING) */ 607208538Sraj 608243693Sgonzo/* 609243693Sgonzo * Don't try to be fancy - being fancy can lead to VOP_LOCK's and therefore 610208538Sraj * to vnode deadlocks. We only do it for OBJT_DEFAULT and OBJT_SWAP objects 611208538Sraj * which we know can be trivially freed. 612247250Skientzle */ 613208538Srajvoid 614208538Srajvm_pageout_page_free(vm_page_t m) { 615208538Sraj vm_object_t object = m->object; 616208538Sraj int type = object->type; 617208538Sraj 618208538Sraj GIANT_REQUIRED; 619208538Sraj if (type == OBJT_SWAP || type == OBJT_DEFAULT) 620208538Sraj vm_object_reference(object); 621208538Sraj vm_page_busy(m); 622208538Sraj vm_page_protect(m, VM_PROT_NONE); 623208538Sraj vm_page_free(m); 624208538Sraj if (type == OBJT_SWAP || type == OBJT_DEFAULT) 625208538Sraj vm_object_deallocate(object); 626208538Sraj} 627208538Sraj 628208538Sraj/* 629208538Sraj * vm_pageout_scan does the dirty work for the pageout daemon. 630208538Sraj */ 631208538Srajstatic void 632208538Srajvm_pageout_scan(int pass) 633208538Sraj{ 634208538Sraj vm_page_t m, next; 635208538Sraj struct vm_page marker; 636208538Sraj int save_page_shortage; 637208538Sraj int save_inactive_count; 638208538Sraj int page_shortage, maxscan, pcount; 639208538Sraj int addl_page_shortage, addl_page_shortage_init; 640208538Sraj struct proc *p, *bigproc; 641208538Sraj vm_offset_t size, bigsize; 642208538Sraj vm_object_t object; 643208538Sraj int actcount; 644208538Sraj int vnodes_skipped = 0; 645208538Sraj int maxlaunder; 646208538Sraj int s; 647208538Sraj struct thread *td; 648208538Sraj 649208538Sraj GIANT_REQUIRED; 650208538Sraj /* 651208538Sraj * Do whatever cleanup that the pmap code can. 652208538Sraj */ 653208538Sraj pmap_collect(); 654208538Sraj uma_reclaim(); 655208538Sraj 656208538Sraj addl_page_shortage_init = vm_pageout_deficit; 657208538Sraj vm_pageout_deficit = 0; 658208538Sraj 659208538Sraj /* 660208538Sraj * Calculate the number of pages we want to either free or move 661208538Sraj * to the cache. 662233230Sraj */ 663233230Sraj page_shortage = vm_paging_target() + addl_page_shortage_init; 664233230Sraj save_page_shortage = page_shortage; 665247250Skientzle save_inactive_count = cnt.v_inactive_count; 666208538Sraj 667208538Sraj /* 668208538Sraj * Initialize our marker 669208538Sraj */ 670265065Sian bzero(&marker, sizeof(marker)); 671208538Sraj marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER; 672208538Sraj marker.queue = PQ_INACTIVE; 673208538Sraj marker.wire_count = 1; 674208538Sraj 675208538Sraj /* 676208538Sraj * Start scanning the inactive queue for pages we can move to the 677208538Sraj * cache or free. The scan will stop when the target is reached or 678265065Sian * we have scanned the entire inactive queue. Note that m->act_count 679265065Sian * is not used to form decisions for the inactive queue, only for the 680208538Sraj * active queue. 681208538Sraj * 682208538Sraj * maxlaunder limits the number of dirty pages we flush per scan. 683208538Sraj * For most systems a smaller value (16 or 32) is more robust under 684208538Sraj * extreme memory and disk pressure because any unnecessary writes 685208538Sraj * to disk can result in extreme performance degredation. However, 686208538Sraj * systems with excessive dirty pages (especially when MAP_NOSYNC is 687208538Sraj * used) will die horribly with limited laundering. If the pageout 688247250Skientzle * daemon cannot clean enough pages in the first pass, we let it go 689208538Sraj * all out in succeeding passes. 690208538Sraj */ 691208538Sraj if ((maxlaunder = vm_max_launder) <= 1) 692208538Sraj maxlaunder = 1; 693208538Sraj if (pass) 694208538Sraj maxlaunder = 10000; 695208538Srajrescan0: 696208538Sraj addl_page_shortage = addl_page_shortage_init; 697208538Sraj maxscan = cnt.v_inactive_count; 698208538Sraj 699208538Sraj for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl); 700208538Sraj m != NULL && maxscan-- > 0 && page_shortage > 0; 701208538Sraj m = next) { 702208538Sraj 703208538Sraj cnt.v_pdpages++; 704208538Sraj 705208538Sraj if (m->queue != PQ_INACTIVE) { 706208538Sraj goto rescan0; 707208538Sraj } 708208538Sraj 709208538Sraj next = TAILQ_NEXT(m, pageq); 710208538Sraj 711208538Sraj /* 712208538Sraj * skip marker pages 713208538Sraj */ 714208538Sraj if (m->flags & PG_MARKER) 715208538Sraj continue; 716208538Sraj 717208538Sraj /* 718208538Sraj * A held page may be undergoing I/O, so skip it. 719208538Sraj */ 720208538Sraj if (m->hold_count) { 721208538Sraj vm_pageq_requeue(m); 722208538Sraj addl_page_shortage++; 723208538Sraj continue; 724208538Sraj } 725208538Sraj /* 726208538Sraj * Don't mess with busy pages, keep in the front of the 727208538Sraj * queue, most likely are being paged out. 728208538Sraj */ 729208538Sraj if (m->busy || (m->flags & PG_BUSY)) { 730208538Sraj addl_page_shortage++; 731247250Skientzle continue; 732208538Sraj } 733208538Sraj 734243693Sgonzo /* 735247250Skientzle * If the object is not being used, we ignore previous 736243693Sgonzo * references. 737208538Sraj */ 738243693Sgonzo if (m->object->ref_count == 0) { 739243693Sgonzo vm_page_flag_clear(m, PG_REFERENCED); 740243693Sgonzo pmap_clear_reference(m); 741243693Sgonzo 742243693Sgonzo /* 743243693Sgonzo * Otherwise, if the page has been referenced while in the 744243693Sgonzo * inactive queue, we bump the "activation count" upwards, 745265065Sian * making it less likely that the page will be added back to 746243693Sgonzo * the inactive queue prematurely again. Here we check the 747243693Sgonzo * page tables (or emulated bits, if any), given the upper 748243693Sgonzo * level VM system not knowing anything about existing 749243693Sgonzo * references. 750243693Sgonzo */ 751243693Sgonzo } else if (((m->flags & PG_REFERENCED) == 0) && 752243693Sgonzo (actcount = pmap_ts_referenced(m))) { 753247250Skientzle vm_page_activate(m); 754247250Skientzle m->act_count += (actcount + ACT_ADVANCE); 755247250Skientzle continue; 756247250Skientzle } 757247250Skientzle 758243693Sgonzo /* 759243693Sgonzo * If the upper level VM system knows about any page 760243693Sgonzo * references, we activate the page. We also set the 761243693Sgonzo * "activation count" higher than normal so that we will less 762243693Sgonzo * likely place pages back onto the inactive queue again. 763243693Sgonzo */ 764243693Sgonzo if ((m->flags & PG_REFERENCED) != 0) { 765208538Sraj vm_page_flag_clear(m, PG_REFERENCED); 766208538Sraj actcount = pmap_ts_referenced(m); 767208538Sraj vm_page_activate(m); 768243693Sgonzo m->act_count += (actcount + ACT_ADVANCE + 1); 769208538Sraj continue; 770208538Sraj } 771208538Sraj 772208538Sraj /* 773208538Sraj * If the upper level VM system doesn't know anything about 774208538Sraj * the page being dirty, we have to check for it again. As 775208538Sraj * far as the VM code knows, any partially dirty pages are 776208538Sraj * fully dirty. 777208538Sraj */ 778208538Sraj if (m->dirty == 0) { 779208538Sraj vm_page_test_dirty(m); 780208538Sraj } else { 781208538Sraj vm_page_dirty(m); 782208538Sraj } 783208538Sraj 784208538Sraj /* 785208538Sraj * Invalid pages can be easily freed 786208538Sraj */ 787243693Sgonzo if (m->valid == 0) { 788208538Sraj vm_pageout_page_free(m); 789208538Sraj cnt.v_dfree++; 790208538Sraj --page_shortage; 791208538Sraj 792208538Sraj /* 793208538Sraj * Clean pages can be placed onto the cache queue. This 794208538Sraj * effectively frees them. 795208538Sraj */ 796208538Sraj } else if (m->dirty == 0) { 797243693Sgonzo vm_page_lock_queues(); 798243693Sgonzo vm_page_cache(m); 799243693Sgonzo vm_page_unlock_queues(); 800243693Sgonzo --page_shortage; 801243693Sgonzo } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) { 802243693Sgonzo /* 803243693Sgonzo * Dirty pages need to be paged out, but flushing 804243693Sgonzo * a page is extremely expensive verses freeing 805208538Sraj * a clean page. Rather then artificially limiting 806208538Sraj * the number of pages we can flush, we instead give 807208538Sraj * dirty pages extra priority on the inactive queue 808208538Sraj * by forcing them to be cycled through the queue 809208538Sraj * twice before being flushed, after which the 810208538Sraj * (now clean) page will cycle through once more 811208538Sraj * before being freed. This significantly extends 812208538Sraj * the thrash point for a heavily loaded machine. 813208538Sraj */ 814243693Sgonzo vm_page_flag_set(m, PG_WINATCFLS); 815243693Sgonzo vm_pageq_requeue(m); 816247201Skientzle } else if (maxlaunder > 0) { 817247045Skientzle /* 818247201Skientzle * We always want to try to flush some dirty pages if 819247201Skientzle * we encounter them, to keep the system stable. 820243693Sgonzo * Normally this number is small, but under extreme 821247201Skientzle * pressure where there are insufficient clean pages 822247201Skientzle * on the inactive queue, we may have to go all out. 823243693Sgonzo */ 824243693Sgonzo int swap_pageouts_ok; 825243693Sgonzo struct vnode *vp = NULL; 826243693Sgonzo struct mount *mp; 827243693Sgonzo 828243693Sgonzo object = m->object; 829243693Sgonzo 830247201Skientzle if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) { 831243693Sgonzo swap_pageouts_ok = 1; 832243693Sgonzo } else { 833243693Sgonzo swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts); 834243693Sgonzo swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts && 835243693Sgonzo vm_page_count_min()); 836247201Skientzle 837247201Skientzle } 838247201Skientzle 839243693Sgonzo /* 840247201Skientzle * We don't bother paging objects that are "dead". 841243693Sgonzo * Those objects are in a "rundown" state. 842243693Sgonzo */ 843243693Sgonzo if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) { 844243693Sgonzo vm_pageq_requeue(m); 845208538Sraj continue; 846208538Sraj } 847208538Sraj 848208538Sraj /* 849208538Sraj * The object is already known NOT to be dead. It 850208538Sraj * is possible for the vget() to block the whole 851208538Sraj * pageout daemon, but the new low-memory handling 852208538Sraj * code should prevent it. 853208538Sraj * 854208538Sraj * The previous code skipped locked vnodes and, worse, 855208538Sraj * reordered pages in the queue. This results in 856208538Sraj * completely non-deterministic operation and, on a 857208538Sraj * busy system, can lead to extremely non-optimal 858208538Sraj * pageouts. For example, it can cause clean pages 859208538Sraj * to be freed and dirty pages to be moved to the end 860208538Sraj * of the queue. Since dirty pages are also moved to 861208538Sraj * the end of the queue once-cleaned, this gives 862208538Sraj * way too large a weighting to defering the freeing 863208538Sraj * of dirty pages. 864208538Sraj * 865208538Sraj * We can't wait forever for the vnode lock, we might 866208538Sraj * deadlock due to a vn_read() getting stuck in 867208538Sraj * vm_wait while holding this vnode. We skip the 868208538Sraj * vnode if we can't get it in a reasonable amount 869208538Sraj * of time. 870208538Sraj */ 871208538Sraj if (object->type == OBJT_VNODE) { 872208538Sraj vp = object->handle; 873208538Sraj 874208538Sraj mp = NULL; 875208538Sraj if (vp->v_type == VREG) 876208538Sraj vn_start_write(vp, &mp, V_NOWAIT); 877208538Sraj if (vget(vp, LK_EXCLUSIVE|LK_NOOBJ|LK_TIMELOCK, curthread)) { 878208538Sraj ++pageout_lock_miss; 879208538Sraj vn_finished_write(mp); 880208538Sraj if (object->flags & OBJ_MIGHTBEDIRTY) 881208538Sraj vnodes_skipped++; 882208538Sraj continue; 883208538Sraj } 884208538Sraj 885208538Sraj /* 886208538Sraj * The page might have been moved to another 887208538Sraj * queue during potential blocking in vget() 888208538Sraj * above. The page might have been freed and 889208538Sraj * reused for another vnode. The object might 890208538Sraj * have been reused for another vnode. 891208538Sraj */ 892208538Sraj if (m->queue != PQ_INACTIVE || 893208538Sraj m->object != object || 894208538Sraj object->handle != vp) { 895208538Sraj if (object->flags & OBJ_MIGHTBEDIRTY) 896208538Sraj vnodes_skipped++; 897208538Sraj vput(vp); 898208538Sraj vn_finished_write(mp); 899208538Sraj continue; 900208538Sraj } 901208538Sraj 902208538Sraj /* 903208538Sraj * The page may have been busied during the 904208538Sraj * blocking in vput(); We don't move the 905208538Sraj * page back onto the end of the queue so that 906208538Sraj * statistics are more correct if we don't. 907208538Sraj */ 908208538Sraj if (m->busy || (m->flags & PG_BUSY)) { 909208538Sraj vput(vp); 910208538Sraj vn_finished_write(mp); 911208538Sraj continue; 912208538Sraj } 913208538Sraj 914208538Sraj /* 915208538Sraj * If the page has become held it might 916208538Sraj * be undergoing I/O, so skip it 917208538Sraj */ 918208538Sraj if (m->hold_count) { 919208538Sraj vm_pageq_requeue(m); 920208538Sraj if (object->flags & OBJ_MIGHTBEDIRTY) 921208538Sraj vnodes_skipped++; 922208538Sraj vput(vp); 923208538Sraj vn_finished_write(mp); 924208538Sraj continue; 925208538Sraj } 926208538Sraj } 927208538Sraj 928208538Sraj /* 929208538Sraj * If a page is dirty, then it is either being washed 930208538Sraj * (but not yet cleaned) or it is still in the 931208538Sraj * laundry. If it is still in the laundry, then we 932208538Sraj * start the cleaning operation. 933208538Sraj * 934208538Sraj * This operation may cluster, invalidating the 'next' 935208538Sraj * pointer. To prevent an inordinate number of 936208538Sraj * restarts we use our marker to remember our place. 937208538Sraj * 938208538Sraj * decrement page_shortage on success to account for 939208538Sraj * the (future) cleaned page. Otherwise we could wind 940208538Sraj * up laundering or cleaning too many pages. 941208538Sraj */ 942208538Sraj s = splvm(); 943208538Sraj TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl, m, &marker, pageq); 944208538Sraj splx(s); 945208538Sraj if (vm_pageout_clean(m) != 0) { 946208538Sraj --page_shortage; 947208538Sraj --maxlaunder; 948208538Sraj } 949208538Sraj s = splvm(); 950208538Sraj next = TAILQ_NEXT(&marker, pageq); 951208538Sraj TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, &marker, pageq); 952208538Sraj splx(s); 953208538Sraj if (vp) { 954208538Sraj vput(vp); 955208538Sraj vn_finished_write(mp); 956208538Sraj } 957208538Sraj } 958208538Sraj } 959208538Sraj 960208538Sraj /* 961208538Sraj * Compute the number of pages we want to try to move from the 962208538Sraj * active queue to the inactive queue. 963208538Sraj */ 964208538Sraj page_shortage = vm_paging_target() + 965208538Sraj cnt.v_inactive_target - cnt.v_inactive_count; 966208538Sraj page_shortage += addl_page_shortage; 967208538Sraj 968208538Sraj vm_page_lock_queues(); 969208538Sraj /* 970208538Sraj * Scan the active queue for things we can deactivate. We nominally 971208538Sraj * track the per-page activity counter and use it to locate 972208538Sraj * deactivation candidates. 973208538Sraj */ 974208538Sraj pcount = cnt.v_active_count; 975208538Sraj m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 976208538Sraj 977208538Sraj while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) { 978208538Sraj 979208538Sraj /* 980208538Sraj * This is a consistency check, and should likely be a panic 981208538Sraj * or warning. 982208538Sraj */ 983208538Sraj if (m->queue != PQ_ACTIVE) { 984208538Sraj break; 985208538Sraj } 986208538Sraj 987208538Sraj next = TAILQ_NEXT(m, pageq); 988208538Sraj /* 989208538Sraj * Don't deactivate pages that are busy. 990208538Sraj */ 991208538Sraj if ((m->busy != 0) || 992208538Sraj (m->flags & PG_BUSY) || 993208538Sraj (m->hold_count != 0)) { 994208538Sraj vm_pageq_requeue(m); 995208538Sraj m = next; 996208538Sraj continue; 997208538Sraj } 998208538Sraj 999208538Sraj /* 1000208538Sraj * The count for pagedaemon pages is done after checking the 1001208538Sraj * page for eligibility... 1002208538Sraj */ 1003208538Sraj cnt.v_pdpages++; 1004208538Sraj 1005208538Sraj /* 1006208538Sraj * Check to see "how much" the page has been used. 1007208538Sraj */ 1008208538Sraj actcount = 0; 1009208538Sraj if (m->object->ref_count != 0) { 1010208538Sraj if (m->flags & PG_REFERENCED) { 1011208538Sraj actcount += 1; 1012208538Sraj } 1013208538Sraj actcount += pmap_ts_referenced(m); 1014208538Sraj if (actcount) { 1015208538Sraj m->act_count += ACT_ADVANCE + actcount; 1016208538Sraj if (m->act_count > ACT_MAX) 1017208538Sraj m->act_count = ACT_MAX; 1018208538Sraj } 1019208538Sraj } 1020208538Sraj 1021208538Sraj /* 1022208538Sraj * Since we have "tested" this bit, we need to clear it now. 1023208538Sraj */ 1024208538Sraj vm_page_flag_clear(m, PG_REFERENCED); 1025233323Sraj 1026208538Sraj /* 1027233323Sraj * Only if an object is currently being used, do we use the 1028208538Sraj * page activation count stats. 1029208538Sraj */ 1030208538Sraj if (actcount && (m->object->ref_count != 0)) { 1031208538Sraj vm_pageq_requeue(m); 1032233323Sraj } else { 1033208538Sraj m->act_count -= min(m->act_count, ACT_DECLINE); 1034233323Sraj if (vm_pageout_algorithm || 1035208538Sraj m->object->ref_count == 0 || 1036208538Sraj m->act_count == 0) { 1037208538Sraj page_shortage--; 1038208538Sraj if (m->object->ref_count == 0) { 1039208538Sraj vm_page_protect(m, VM_PROT_NONE); 1040233323Sraj if (m->dirty == 0) 1041208538Sraj vm_page_cache(m); 1042208538Sraj else 1043233323Sraj vm_page_deactivate(m); 1044208538Sraj } else { 1045208538Sraj vm_page_deactivate(m); 1046233323Sraj } 1047233323Sraj } else { 1048233323Sraj vm_pageq_requeue(m); 1049233323Sraj } 1050233323Sraj } 1051233323Sraj m = next; 1052208538Sraj } 1053233323Sraj vm_page_unlock_queues(); 1054233323Sraj s = splvm(); 1055233323Sraj 1056233323Sraj /* 1057233323Sraj * We try to maintain some *really* free pages, this allows interrupt 1058233323Sraj * code to be guaranteed space. Since both cache and free queues 1059233323Sraj * are considered basically 'free', moving pages from cache to free 1060208538Sraj * does not effect other calculations. 1061208538Sraj */ 1062208538Sraj while (cnt.v_free_count < cnt.v_free_reserved) { 1063208538Sraj static int cache_rover = 0; 1064208538Sraj m = vm_pageq_find(PQ_CACHE, cache_rover, FALSE); 1065208538Sraj if (!m) 1066208538Sraj break; 1067208538Sraj if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || 1068208538Sraj m->busy || 1069208538Sraj m->hold_count || 1070208538Sraj m->wire_count) { 1071208538Sraj#ifdef INVARIANTS 1072208538Sraj printf("Warning: busy page %p found in cache\n", m); 1073208538Sraj#endif 1074208538Sraj vm_page_deactivate(m); 1075208538Sraj continue; 1076208538Sraj } 1077208538Sraj cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK; 1078208538Sraj vm_pageout_page_free(m); 1079233323Sraj cnt.v_dfree++; 1080233323Sraj } 1081208538Sraj splx(s); 1082233323Sraj 1083233323Sraj#if !defined(NO_SWAPPING) 1084208538Sraj /* 1085208538Sraj * Idle process swapout -- run once per second. 1086208538Sraj */ 1087208538Sraj if (vm_swap_idle_enabled) { 1088208538Sraj static long lsec; 1089233323Sraj if (time_second != lsec) { 1090208538Sraj vm_pageout_req_swapout |= VM_SWAP_IDLE; 1091208538Sraj vm_req_vmdaemon(); 1092208538Sraj lsec = time_second; 1093208538Sraj } 1094208538Sraj } 1095208538Sraj#endif 1096208538Sraj 1097208538Sraj /* 1098208538Sraj * If we didn't get enough free pages, and we have skipped a vnode 1099208538Sraj * in a writeable object, wakeup the sync daemon. And kick swapout 1100208538Sraj * if we did not get enough free pages. 1101208538Sraj */ 1102208538Sraj if (vm_paging_target() > 0) { 1103208538Sraj if (vnodes_skipped && vm_page_count_min()) 1104208538Sraj (void) speedup_syncer(); 1105208538Sraj#if !defined(NO_SWAPPING) 1106208538Sraj if (vm_swap_enabled && vm_page_count_target()) { 1107208538Sraj vm_req_vmdaemon(); 1108208538Sraj vm_pageout_req_swapout |= VM_SWAP_NORMAL; 1109208538Sraj } 1110208538Sraj#endif 1111208538Sraj } 1112233323Sraj 1113208538Sraj /* 1114233323Sraj * If we are out of swap and were not able to reach our paging 1115208538Sraj * target, kill the largest process. 1116233323Sraj * 1117233323Sraj * We keep the process bigproc locked once we find it to keep anyone 1118233323Sraj * from messing with it; however, there is a possibility of 1119233323Sraj * deadlock if process B is bigproc and one of it's child processes 1120233323Sraj * attempts to propagate a signal to B while we are waiting for A's 1121208538Sraj * lock while walking this list. To avoid this, we don't block on 1122208538Sraj * the process lock but just skip a process if it is already locked. 1123208538Sraj */ 1124208538Sraj if ((vm_swap_size < 64 && vm_page_count_min()) || 1125208538Sraj (swap_pager_full && vm_paging_target() > 0)) { 1126208538Sraj#if 0 1127208538Sraj if ((vm_swap_size < 64 || swap_pager_full) && vm_page_count_min()) { 1128208538Sraj#endif 1129208538Sraj bigproc = NULL; 1130208538Sraj bigsize = 0; 1131208538Sraj sx_slock(&allproc_lock); 1132208538Sraj FOREACH_PROC_IN_SYSTEM(p) { 1133233323Sraj int breakout; 1134233323Sraj /* 1135208538Sraj * If this process is already locked, skip it. 1136233323Sraj */ 1137233323Sraj if (PROC_TRYLOCK(p) == 0) 1138208538Sraj continue; 1139208538Sraj /* 1140208538Sraj * if this is a system process, skip it 1141208538Sraj */ 1142208538Sraj if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) || 1143233323Sraj ((p->p_pid < 48) && (vm_swap_size != 0))) { 1144208538Sraj PROC_UNLOCK(p); 1145208538Sraj continue; 1146208538Sraj } 1147208538Sraj /* 1148208538Sraj * if the process is in a non-running type state, 1149208538Sraj * don't touch it. Check all the threads individually. 1150208538Sraj */ 1151208538Sraj mtx_lock_spin(&sched_lock); 1152208538Sraj breakout = 0; 1153208538Sraj FOREACH_THREAD_IN_PROC(p, td) { 1154208538Sraj if (td->td_state != TDS_RUNQ && 1155208538Sraj td->td_state != TDS_RUNNING && 1156208538Sraj td->td_state != TDS_SLP) { 1157208538Sraj breakout = 1; 1158208538Sraj break; 1159208538Sraj } 1160208538Sraj } 1161208538Sraj if (breakout) { 1162233323Sraj mtx_unlock_spin(&sched_lock); 1163208538Sraj PROC_UNLOCK(p); 1164233323Sraj continue; 1165208538Sraj } 1166233323Sraj mtx_unlock_spin(&sched_lock); 1167233323Sraj /* 1168233323Sraj * get the process size 1169233323Sraj */ 1170233323Sraj size = vmspace_resident_count(p->p_vmspace) + 1171208538Sraj vmspace_swap_count(p->p_vmspace); 1172208538Sraj /* 1173208538Sraj * if the this process is bigger than the biggest one 1174208538Sraj * remember it. 1175208538Sraj */ 1176208538Sraj if (size > bigsize) { 1177208538Sraj if (bigproc != NULL) 1178208538Sraj PROC_UNLOCK(bigproc); 1179208538Sraj bigproc = p; 1180208538Sraj bigsize = size; 1181233323Sraj } else 1182233323Sraj PROC_UNLOCK(p); 1183208538Sraj } 1184233323Sraj sx_sunlock(&allproc_lock); 1185233323Sraj if (bigproc != NULL) { 1186208538Sraj struct ksegrp *kg; 1187208538Sraj killproc(bigproc, "out of swap space"); 1188208538Sraj mtx_lock_spin(&sched_lock); 1189208538Sraj FOREACH_KSEGRP_IN_PROC(bigproc, kg) { 1190208538Sraj kg->kg_estcpu = 0; 1191208538Sraj kg->kg_nice = PRIO_MIN; /* XXXKSE ??? */ 1192208538Sraj resetpriority(kg); 1193208538Sraj } 1194208538Sraj mtx_unlock_spin(&sched_lock); 1195208538Sraj PROC_UNLOCK(bigproc); 1196208538Sraj wakeup(&cnt.v_free_count); 1197208538Sraj } 1198208538Sraj } 1199208538Sraj} 1200208538Sraj 1201208538Sraj/* 1202208538Sraj * This routine tries to maintain the pseudo LRU active queue, 1203208538Sraj * so that during long periods of time where there is no paging, 1204208538Sraj * that some statistic accumulation still occurs. This code 1205208538Sraj * helps the situation where paging just starts to occur. 1206208538Sraj */ 1207208538Srajstatic void 1208208538Srajvm_pageout_page_stats() 1209208538Sraj{ 1210208538Sraj vm_page_t m,next; 1211208538Sraj int pcount,tpcount; /* Number of pages to check */ 1212208538Sraj static int fullintervalcount = 0; 1213208538Sraj int page_shortage; 1214208538Sraj int s0; 1215208538Sraj 1216208538Sraj page_shortage = 1217208538Sraj (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) - 1218208538Sraj (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 1219208538Sraj 1220208538Sraj if (page_shortage <= 0) 1221208538Sraj return; 1222208538Sraj 1223208538Sraj s0 = splvm(); 1224208538Sraj vm_page_lock_queues(); 1225208538Sraj pcount = cnt.v_active_count; 1226208538Sraj fullintervalcount += vm_pageout_stats_interval; 1227208538Sraj if (fullintervalcount < vm_pageout_full_stats_interval) { 1228208538Sraj tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count; 1229208538Sraj if (pcount > tpcount) 1230208538Sraj pcount = tpcount; 1231208538Sraj } else { 1232208538Sraj fullintervalcount = 0; 1233208538Sraj } 1234208538Sraj 1235208538Sraj m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 1236208538Sraj while ((m != NULL) && (pcount-- > 0)) { 1237208538Sraj int actcount; 1238208538Sraj 1239208538Sraj if (m->queue != PQ_ACTIVE) { 1240208538Sraj break; 1241208538Sraj } 1242208538Sraj 1243208538Sraj next = TAILQ_NEXT(m, pageq); 1244208538Sraj /* 1245208538Sraj * Don't deactivate pages that are busy. 1246208538Sraj */ 1247208538Sraj if ((m->busy != 0) || 1248208538Sraj (m->flags & PG_BUSY) || 1249208538Sraj (m->hold_count != 0)) { 1250208538Sraj vm_pageq_requeue(m); 1251208538Sraj m = next; 1252208538Sraj continue; 1253208538Sraj } 1254208538Sraj 1255208538Sraj actcount = 0; 1256208538Sraj if (m->flags & PG_REFERENCED) { 1257208538Sraj vm_page_flag_clear(m, PG_REFERENCED); 1258208538Sraj actcount += 1; 1259208538Sraj } 1260208538Sraj 1261208538Sraj actcount += pmap_ts_referenced(m); 1262208538Sraj if (actcount) { 1263208538Sraj m->act_count += ACT_ADVANCE + actcount; 1264208538Sraj if (m->act_count > ACT_MAX) 1265208538Sraj m->act_count = ACT_MAX; 1266208538Sraj vm_pageq_requeue(m); 1267208538Sraj } else { 1268208538Sraj if (m->act_count == 0) { 1269208538Sraj /* 1270208538Sraj * We turn off page access, so that we have 1271208538Sraj * more accurate RSS stats. We don't do this 1272208538Sraj * in the normal page deactivation when the 1273208538Sraj * system is loaded VM wise, because the 1274208538Sraj * cost of the large number of page protect 1275208538Sraj * operations would be higher than the value 1276208538Sraj * of doing the operation. 1277208538Sraj */ 1278208538Sraj vm_page_protect(m, VM_PROT_NONE); 1279208538Sraj vm_page_deactivate(m); 1280208538Sraj } else { 1281208538Sraj m->act_count -= min(m->act_count, ACT_DECLINE); 1282208538Sraj vm_pageq_requeue(m); 1283208538Sraj } 1284208538Sraj } 1285208538Sraj 1286208538Sraj m = next; 1287208538Sraj } 1288208538Sraj vm_page_unlock_queues(); 1289208538Sraj splx(s0); 1290208538Sraj} 1291208538Sraj 1292208538Srajstatic int 1293208538Srajvm_pageout_free_page_calc(count) 1294208538Srajvm_size_t count; 1295208538Sraj{ 1296208538Sraj if (count < cnt.v_page_count) 1297208538Sraj return 0; 1298208538Sraj /* 1299208538Sraj * free_reserved needs to include enough for the largest swap pager 1300208538Sraj * structures plus enough for any pv_entry structs when paging. 1301208538Sraj */ 1302208538Sraj if (cnt.v_page_count > 1024) 1303208538Sraj cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; 1304208538Sraj else 1305208538Sraj cnt.v_free_min = 4; 1306208538Sraj cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 1307208538Sraj cnt.v_interrupt_free_min; 1308208538Sraj cnt.v_free_reserved = vm_pageout_page_count + 1309208538Sraj cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE; 1310208538Sraj cnt.v_free_severe = cnt.v_free_min / 2; 1311208538Sraj cnt.v_free_min += cnt.v_free_reserved; 1312208538Sraj cnt.v_free_severe += cnt.v_free_reserved; 1313208538Sraj return 1; 1314208538Sraj} 1315208538Sraj 1316208538Sraj/* 1317208538Sraj * vm_pageout is the high level pageout daemon. 1318208538Sraj */ 1319208538Srajstatic void 1320208538Srajvm_pageout() 1321208538Sraj{ 1322208538Sraj int pass; 1323208538Sraj 1324233323Sraj mtx_lock(&Giant); 1325233323Sraj 1326233323Sraj /* 1327233323Sraj * Initialize some paging parameters. 1328233323Sraj */ 1329233323Sraj cnt.v_interrupt_free_min = 2; 1330233323Sraj if (cnt.v_page_count < 2000) 1331233323Sraj vm_pageout_page_count = 8; 1332208538Sraj 1333208538Sraj vm_pageout_free_page_calc(cnt.v_page_count); 1334208538Sraj /* 1335208538Sraj * v_free_target and v_cache_min control pageout hysteresis. Note 1336208538Sraj * that these are more a measure of the VM cache queue hysteresis 1337208538Sraj * then the VM free queue. Specifically, v_free_target is the 1338208538Sraj * high water mark (free+cache pages). 1339208538Sraj * 1340208538Sraj * v_free_reserved + v_cache_min (mostly means v_cache_min) is the 1341208538Sraj * low water mark, while v_free_min is the stop. v_cache_min must 1342208538Sraj * be big enough to handle memory needs while the pageout daemon 1343208538Sraj * is signalled and run to free more pages. 1344208538Sraj */ 1345208538Sraj if (cnt.v_free_count > 6144) 1346208538Sraj cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved; 1347208538Sraj else 1348208538Sraj cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved; 1349208538Sraj 1350208538Sraj if (cnt.v_free_count > 2048) { 1351208538Sraj cnt.v_cache_min = cnt.v_free_target; 1352208538Sraj cnt.v_cache_max = 2 * cnt.v_cache_min; 1353208538Sraj cnt.v_inactive_target = (3 * cnt.v_free_target) / 2; 1354208538Sraj } else { 1355208538Sraj cnt.v_cache_min = 0; 1356208538Sraj cnt.v_cache_max = 0; 1357208538Sraj cnt.v_inactive_target = cnt.v_free_count / 4; 1358208538Sraj } 1359208538Sraj if (cnt.v_inactive_target > cnt.v_free_count / 3) 1360208538Sraj cnt.v_inactive_target = cnt.v_free_count / 3; 1361208538Sraj 1362208538Sraj /* XXX does not really belong here */ 1363208538Sraj if (vm_page_max_wired == 0) 1364208538Sraj vm_page_max_wired = cnt.v_free_count / 3; 1365208538Sraj 1366208538Sraj if (vm_pageout_stats_max == 0) 1367208538Sraj vm_pageout_stats_max = cnt.v_free_target; 1368208538Sraj 1369208538Sraj /* 1370208538Sraj * Set interval in seconds for stats scan. 1371208538Sraj */ 1372208538Sraj if (vm_pageout_stats_interval == 0) 1373208538Sraj vm_pageout_stats_interval = 5; 1374208538Sraj if (vm_pageout_full_stats_interval == 0) 1375208538Sraj vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4; 1376208538Sraj 1377208538Sraj /* 1378208538Sraj * Set maximum free per pass 1379208538Sraj */ 1380208538Sraj if (vm_pageout_stats_free_max == 0) 1381208538Sraj vm_pageout_stats_free_max = 5; 1382208538Sraj 1383208538Sraj swap_pager_swap_init(); 1384208538Sraj pass = 0; 1385208538Sraj /* 1386208538Sraj * The pageout daemon is never done, so loop forever. 1387208538Sraj */ 1388208538Sraj while (TRUE) { 1389208538Sraj int error; 1390208538Sraj int s = splvm(); 1391208538Sraj 1392208538Sraj /* 1393208538Sraj * If we have enough free memory, wakeup waiters. Do 1394208538Sraj * not clear vm_pages_needed until we reach our target, 1395208538Sraj * otherwise we may be woken up over and over again and 1396208538Sraj * waste a lot of cpu. 1397208538Sraj */ 1398208538Sraj if (vm_pages_needed && !vm_page_count_min()) { 1399208538Sraj if (vm_paging_needed() <= 0) 1400208538Sraj vm_pages_needed = 0; 1401208538Sraj wakeup(&cnt.v_free_count); 1402208538Sraj } 1403208538Sraj if (vm_pages_needed) { 1404208538Sraj /* 1405208538Sraj * Still not done, take a second pass without waiting 1406208538Sraj * (unlimited dirty cleaning), otherwise sleep a bit 1407208538Sraj * and try again. 1408208538Sraj */ 1409208538Sraj ++pass; 1410208538Sraj if (pass > 1) 1411208538Sraj tsleep(&vm_pages_needed, PVM, 1412208538Sraj "psleep", hz/2); 1413208538Sraj } else { 1414208538Sraj /* 1415208538Sraj * Good enough, sleep & handle stats. Prime the pass 1416208538Sraj * for the next run. 1417208538Sraj */ 1418208538Sraj if (pass > 1) 1419208538Sraj pass = 1; 1420208538Sraj else 1421208538Sraj pass = 0; 1422208538Sraj error = tsleep(&vm_pages_needed, PVM, 1423208538Sraj "psleep", vm_pageout_stats_interval * hz); 1424208538Sraj if (error && !vm_pages_needed) { 1425208538Sraj splx(s); 1426208538Sraj pass = 0; 1427208538Sraj vm_pageout_page_stats(); 1428208538Sraj continue; 1429208538Sraj } 1430208538Sraj } 1431208538Sraj 1432208538Sraj if (vm_pages_needed) 1433208538Sraj cnt.v_pdwakeups++; 1434208538Sraj splx(s); 1435208538Sraj vm_pageout_scan(pass); 1436208538Sraj vm_pageout_deficit = 0; 1437208538Sraj } 1438208538Sraj} 1439208538Sraj 1440208538Srajvoid 1441208538Srajpagedaemon_wakeup() 1442208538Sraj{ 1443208538Sraj if (!vm_pages_needed && curthread->td_proc != pageproc) { 1444208538Sraj vm_pages_needed++; 1445208538Sraj wakeup(&vm_pages_needed); 1446208538Sraj } 1447208538Sraj} 1448208538Sraj 1449208538Sraj#if !defined(NO_SWAPPING) 1450208538Srajstatic void 1451208538Srajvm_req_vmdaemon() 1452208538Sraj{ 1453208538Sraj static int lastrun = 0; 1454208538Sraj 1455208538Sraj if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 1456208538Sraj wakeup(&vm_daemon_needed); 1457208538Sraj lastrun = ticks; 1458208538Sraj } 1459208538Sraj} 1460208538Sraj 1461208538Srajstatic void 1462208538Srajvm_daemon() 1463208538Sraj{ 1464208538Sraj struct proc *p; 1465208538Sraj int breakout; 1466208538Sraj struct thread *td; 1467208538Sraj 1468208538Sraj mtx_lock(&Giant); 1469208538Sraj while (TRUE) { 1470208538Sraj tsleep(&vm_daemon_needed, PPAUSE, "psleep", 0); 1471208538Sraj if (vm_pageout_req_swapout) { 1472208538Sraj swapout_procs(vm_pageout_req_swapout); 1473208538Sraj vm_pageout_req_swapout = 0; 1474208538Sraj } 1475208538Sraj /* 1476208538Sraj * scan the processes for exceeding their rlimits or if 1477208538Sraj * process is swapped out -- deactivate pages 1478208538Sraj */ 1479208538Sraj sx_slock(&allproc_lock); 1480208538Sraj LIST_FOREACH(p, &allproc, p_list) { 1481208538Sraj vm_pindex_t limit, size; 1482208538Sraj 1483208538Sraj /* 1484208538Sraj * if this is a system process or if we have already 1485208538Sraj * looked at this process, skip it. 1486208538Sraj */ 1487208538Sraj if (p->p_flag & (P_SYSTEM | P_WEXIT)) { 1488208538Sraj continue; 1489208538Sraj } 1490208538Sraj /* 1491208538Sraj * if the process is in a non-running type state, 1492208538Sraj * don't touch it. 1493208538Sraj */ 1494208538Sraj mtx_lock_spin(&sched_lock); 1495208538Sraj breakout = 0; 1496208538Sraj FOREACH_THREAD_IN_PROC(p, td) { 1497208538Sraj if (td->td_state != TDS_RUNQ && 1498208538Sraj td->td_state != TDS_RUNNING && 1499208538Sraj td->td_state != TDS_SLP) { 1500208538Sraj breakout = 1; 1501208538Sraj break; 1502208538Sraj } 1503208538Sraj } 1504208538Sraj if (breakout) { 1505208538Sraj mtx_unlock_spin(&sched_lock); 1506208538Sraj continue; 1507208538Sraj } 1508208538Sraj /* 1509208538Sraj * get a limit 1510208538Sraj */ 1511208538Sraj limit = OFF_TO_IDX( 1512208538Sraj qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 1513208538Sraj p->p_rlimit[RLIMIT_RSS].rlim_max)); 1514208538Sraj 1515208538Sraj /* 1516208538Sraj * let processes that are swapped out really be 1517208538Sraj * swapped out set the limit to nothing (will force a 1518208538Sraj * swap-out.) 1519208538Sraj */ 1520208538Sraj if ((p->p_sflag & PS_INMEM) == 0) 1521208538Sraj limit = 0; /* XXX */ 1522208538Sraj mtx_unlock_spin(&sched_lock); 1523208538Sraj 1524208538Sraj size = vmspace_resident_count(p->p_vmspace); 1525208538Sraj if (limit >= 0 && size >= limit) { 1526208538Sraj vm_pageout_map_deactivate_pages( 1527208538Sraj &p->p_vmspace->vm_map, limit); 1528208538Sraj } 1529208538Sraj } 1530208538Sraj sx_sunlock(&allproc_lock); 1531208538Sraj } 1532208538Sraj} 1533208538Sraj#endif /* !defined(NO_SWAPPING) */ 1534208538Sraj