uvm_page.c revision 1.86
1/* $OpenBSD: uvm_page.c,v 1.86 2009/06/06 17:46:44 art Exp $ */ 2/* $NetBSD: uvm_page.c,v 1.44 2000/11/27 08:40:04 chs Exp $ */ 3 4/* 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * Copyright (c) 1991, 1993, The Regents of the University of California. 7 * 8 * All rights reserved. 9 * 10 * This code is derived from software contributed to Berkeley by 11 * The Mach Operating System project at Carnegie-Mellon University. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by Charles D. Cranor, 24 * Washington University, the University of California, Berkeley and 25 * its contributors. 26 * 4. Neither the name of the University nor the names of its contributors 27 * may be used to endorse or promote products derived from this software 28 * without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 * 42 * @(#)vm_page.c 8.3 (Berkeley) 3/21/94 43 * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp 44 * 45 * 46 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 47 * All rights reserved. 48 * 49 * Permission to use, copy, modify and distribute this software and 50 * its documentation is hereby granted, provided that both the copyright 51 * notice and this permission notice appear in all copies of the 52 * software, derivative works or modified versions, and any portions 53 * thereof, and that both notices appear in supporting documentation. 54 * 55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 56 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 58 * 59 * Carnegie Mellon requests users of this software to return to 60 * 61 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 62 * School of Computer Science 63 * Carnegie Mellon University 64 * Pittsburgh PA 15213-3890 65 * 66 * any improvements or extensions that they make and grant Carnegie the 67 * rights to redistribute these changes. 68 */ 69 70/* 71 * uvm_page.c: page ops. 72 */ 73 74#include <sys/param.h> 75#include <sys/systm.h> 76#include <sys/malloc.h> 77#include <sys/sched.h> 78#include <sys/kernel.h> 79#include <sys/vnode.h> 80#include <sys/mount.h> 81 82#include <uvm/uvm.h> 83 84/* 85 * global vars... XXXCDC: move to uvm. structure. 86 */ 87 88/* 89 * physical memory config is stored in vm_physmem. 90 */ 91 92struct vm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */ 93int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */ 94 95/* 96 * Some supported CPUs in a given architecture don't support all 97 * of the things necessary to do idle page zero'ing efficiently. 98 * We therefore provide a way to disable it from machdep code here. 99 */ 100 101/* 102 * XXX disabled until we can find a way to do this without causing 103 * problems for either cpu caches or DMA latency. 104 */ 105boolean_t vm_page_zero_enable = FALSE; 106 107/* 108 * local variables 109 */ 110 111/* 112 * these variables record the values returned by vm_page_bootstrap, 113 * for debugging purposes. The implementation of uvm_pageboot_alloc 114 * and pmap_startup here also uses them internally. 115 */ 116 117static vaddr_t virtual_space_start; 118static vaddr_t virtual_space_end; 119 120/* 121 * History 122 */ 123UVMHIST_DECL(pghist); 124 125/* 126 * local prototypes 127 */ 128 129static void uvm_pageinsert(struct vm_page *); 130static void uvm_pageremove(struct vm_page *); 131 132/* 133 * inline functions 134 */ 135 136/* 137 * uvm_pageinsert: insert a page in the object 138 * 139 * => caller must lock object 140 * => caller must lock page queues 141 * => call should have already set pg's object and offset pointers 142 * and bumped the version counter 143 */ 144 145__inline static void 146uvm_pageinsert(struct vm_page *pg) 147{ 148 UVMHIST_FUNC("uvm_pageinsert"); UVMHIST_CALLED(pghist); 149 150 KASSERT((pg->pg_flags & PG_TABLED) == 0); 151 152 RB_INSERT(uobj_pgs, &pg->uobject->memt, pg); 153 atomic_setbits_int(&pg->pg_flags, PG_TABLED); 154 pg->uobject->uo_npages++; 155} 156 157/* 158 * uvm_page_remove: remove page from object 159 * 160 * => caller must lock object 161 * => caller must lock page queues 162 */ 163 164static __inline void 165uvm_pageremove(struct vm_page *pg) 166{ 167 UVMHIST_FUNC("uvm_pageremove"); UVMHIST_CALLED(pghist); 168 169 KASSERT(pg->pg_flags & PG_TABLED); 170 /* object should be locked */ 171 RB_REMOVE(uobj_pgs, &pg->uobject->memt, pg); 172 173 atomic_clearbits_int(&pg->pg_flags, PG_TABLED|PQ_AOBJ); 174 pg->uobject->uo_npages--; 175 pg->uobject = NULL; 176 pg->pg_version++; 177} 178 179int 180uvm_pagecmp(struct vm_page *a, struct vm_page *b) 181{ 182 return (a->offset < b->offset ? -1 : a->offset > b->offset); 183} 184 185RB_GENERATE(uobj_pgs, vm_page, fq.queues.tree, uvm_pagecmp); 186 187/* 188 * uvm_page_init: init the page system. called from uvm_init(). 189 * 190 * => we return the range of kernel virtual memory in kvm_startp/kvm_endp 191 */ 192 193void 194uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp) 195{ 196 vsize_t freepages, pagecount, n; 197 vm_page_t pagearray; 198 int lcv, i; 199 paddr_t paddr; 200#if defined(UVMHIST) 201 static struct uvm_history_ent pghistbuf[100]; 202#endif 203 204 UVMHIST_FUNC("uvm_page_init"); 205 UVMHIST_INIT_STATIC(pghist, pghistbuf); 206 UVMHIST_CALLED(pghist); 207 208 /* 209 * init the page queues and page queue locks 210 */ 211 212 TAILQ_INIT(&uvm.page_active); 213 TAILQ_INIT(&uvm.page_inactive_swp); 214 TAILQ_INIT(&uvm.page_inactive_obj); 215 simple_lock_init(&uvm.pageqlock); 216 mtx_init(&uvm.fpageqlock, IPL_VM); 217 uvm_pmr_init(); 218 219 /* 220 * allocate vm_page structures. 221 */ 222 223 /* 224 * sanity check: 225 * before calling this function the MD code is expected to register 226 * some free RAM with the uvm_page_physload() function. our job 227 * now is to allocate vm_page structures for this memory. 228 */ 229 230 if (vm_nphysseg == 0) 231 panic("uvm_page_bootstrap: no memory pre-allocated"); 232 233 /* 234 * first calculate the number of free pages... 235 * 236 * note that we use start/end rather than avail_start/avail_end. 237 * this allows us to allocate extra vm_page structures in case we 238 * want to return some memory to the pool after booting. 239 */ 240 241 freepages = 0; 242 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 243 freepages += (vm_physmem[lcv].end - vm_physmem[lcv].start); 244 245 /* 246 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can 247 * use. for each page of memory we use we need a vm_page structure. 248 * thus, the total number of pages we can use is the total size of 249 * the memory divided by the PAGE_SIZE plus the size of the vm_page 250 * structure. we add one to freepages as a fudge factor to avoid 251 * truncation errors (since we can only allocate in terms of whole 252 * pages). 253 */ 254 255 pagecount = (((paddr_t)freepages + 1) << PAGE_SHIFT) / 256 (PAGE_SIZE + sizeof(struct vm_page)); 257 pagearray = (vm_page_t)uvm_pageboot_alloc(pagecount * 258 sizeof(struct vm_page)); 259 memset(pagearray, 0, pagecount * sizeof(struct vm_page)); 260 261 /* 262 * init the vm_page structures and put them in the correct place. 263 */ 264 265 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) { 266 n = vm_physmem[lcv].end - vm_physmem[lcv].start; 267 if (n > pagecount) { 268 printf("uvm_page_init: lost %ld page(s) in init\n", 269 (long)(n - pagecount)); 270 panic("uvm_page_init"); /* XXXCDC: shouldn't happen? */ 271 /* n = pagecount; */ 272 } 273 274 /* set up page array pointers */ 275 vm_physmem[lcv].pgs = pagearray; 276 pagearray += n; 277 pagecount -= n; 278 vm_physmem[lcv].lastpg = vm_physmem[lcv].pgs + (n - 1); 279 280 /* init and free vm_pages (we've already zeroed them) */ 281 paddr = ptoa(vm_physmem[lcv].start); 282 for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) { 283 vm_physmem[lcv].pgs[i].phys_addr = paddr; 284#ifdef __HAVE_VM_PAGE_MD 285 VM_MDPAGE_INIT(&vm_physmem[lcv].pgs[i]); 286#endif 287 if (atop(paddr) >= vm_physmem[lcv].avail_start && 288 atop(paddr) <= vm_physmem[lcv].avail_end) { 289 uvmexp.npages++; 290 } 291 } 292 293 /* add pages to free pool */ 294 uvm_pmr_freepages(&vm_physmem[lcv].pgs[ 295 vm_physmem[lcv].avail_start - vm_physmem[lcv].start], 296 vm_physmem[lcv].avail_end - vm_physmem[lcv].avail_start); 297 } 298 299 /* 300 * pass up the values of virtual_space_start and 301 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper 302 * layers of the VM. 303 */ 304 305 *kvm_startp = round_page(virtual_space_start); 306 *kvm_endp = trunc_page(virtual_space_end); 307 308 /* 309 * init locks for kernel threads 310 */ 311 mtx_init(&uvm.aiodoned_lock, IPL_BIO); 312 313 /* 314 * init reserve thresholds 315 * XXXCDC - values may need adjusting 316 */ 317 uvmexp.reserve_pagedaemon = 4; 318 uvmexp.reserve_kernel = 6; 319 uvmexp.anonminpct = 10; 320 uvmexp.vnodeminpct = 10; 321 uvmexp.vtextminpct = 5; 322 uvmexp.anonmin = uvmexp.anonminpct * 256 / 100; 323 uvmexp.vnodemin = uvmexp.vnodeminpct * 256 / 100; 324 uvmexp.vtextmin = uvmexp.vtextminpct * 256 / 100; 325 326 /* 327 * determine if we should zero pages in the idle loop. 328 */ 329 330 uvm.page_idle_zero = vm_page_zero_enable; 331 332 /* 333 * done! 334 */ 335 336 uvm.page_init_done = TRUE; 337} 338 339/* 340 * uvm_setpagesize: set the page size 341 * 342 * => sets page_shift and page_mask from uvmexp.pagesize. 343 */ 344 345void 346uvm_setpagesize(void) 347{ 348 if (uvmexp.pagesize == 0) 349 uvmexp.pagesize = DEFAULT_PAGE_SIZE; 350 uvmexp.pagemask = uvmexp.pagesize - 1; 351 if ((uvmexp.pagemask & uvmexp.pagesize) != 0) 352 panic("uvm_setpagesize: page size not a power of two"); 353 for (uvmexp.pageshift = 0; ; uvmexp.pageshift++) 354 if ((1 << uvmexp.pageshift) == uvmexp.pagesize) 355 break; 356} 357 358/* 359 * uvm_pageboot_alloc: steal memory from physmem for bootstrapping 360 */ 361 362vaddr_t 363uvm_pageboot_alloc(vsize_t size) 364{ 365#if defined(PMAP_STEAL_MEMORY) 366 vaddr_t addr; 367 368 /* 369 * defer bootstrap allocation to MD code (it may want to allocate 370 * from a direct-mapped segment). pmap_steal_memory should round 371 * off virtual_space_start/virtual_space_end. 372 */ 373 374 addr = pmap_steal_memory(size, &virtual_space_start, 375 &virtual_space_end); 376 377 return(addr); 378 379#else /* !PMAP_STEAL_MEMORY */ 380 381 static boolean_t initialized = FALSE; 382 vaddr_t addr, vaddr; 383 paddr_t paddr; 384 385 /* round to page size */ 386 size = round_page(size); 387 388 /* 389 * on first call to this function, initialize ourselves. 390 */ 391 if (initialized == FALSE) { 392 pmap_virtual_space(&virtual_space_start, &virtual_space_end); 393 394 /* round it the way we like it */ 395 virtual_space_start = round_page(virtual_space_start); 396 virtual_space_end = trunc_page(virtual_space_end); 397 398 initialized = TRUE; 399 } 400 401 /* 402 * allocate virtual memory for this request 403 */ 404 if (virtual_space_start == virtual_space_end || 405 (virtual_space_end - virtual_space_start) < size) 406 panic("uvm_pageboot_alloc: out of virtual space"); 407 408 addr = virtual_space_start; 409 410#ifdef PMAP_GROWKERNEL 411 /* 412 * If the kernel pmap can't map the requested space, 413 * then allocate more resources for it. 414 */ 415 if (uvm_maxkaddr < (addr + size)) { 416 uvm_maxkaddr = pmap_growkernel(addr + size); 417 if (uvm_maxkaddr < (addr + size)) 418 panic("uvm_pageboot_alloc: pmap_growkernel() failed"); 419 } 420#endif 421 422 virtual_space_start += size; 423 424 /* 425 * allocate and mapin physical pages to back new virtual pages 426 */ 427 428 for (vaddr = round_page(addr) ; vaddr < addr + size ; 429 vaddr += PAGE_SIZE) { 430 431 if (!uvm_page_physget(&paddr)) 432 panic("uvm_pageboot_alloc: out of memory"); 433 434 /* 435 * Note this memory is no longer managed, so using 436 * pmap_kenter is safe. 437 */ 438 pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE); 439 } 440 pmap_update(pmap_kernel()); 441 return(addr); 442#endif /* PMAP_STEAL_MEMORY */ 443} 444 445#if !defined(PMAP_STEAL_MEMORY) 446/* 447 * uvm_page_physget: "steal" one page from the vm_physmem structure. 448 * 449 * => attempt to allocate it off the end of a segment in which the "avail" 450 * values match the start/end values. if we can't do that, then we 451 * will advance both values (making them equal, and removing some 452 * vm_page structures from the non-avail area). 453 * => return false if out of memory. 454 */ 455 456/* subroutine: try to allocate from memory chunks on the specified freelist */ 457boolean_t uvm_page_physget_freelist(paddr_t *, int); 458 459boolean_t 460uvm_page_physget_freelist(paddr_t *paddrp, int freelist) 461{ 462 int lcv, x; 463 UVMHIST_FUNC("uvm_page_physget_freelist"); UVMHIST_CALLED(pghist); 464 465 /* pass 1: try allocating from a matching end */ 466#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \ 467 (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 468 for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--) 469#else 470 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 471#endif 472 { 473 474 if (uvm.page_init_done == TRUE) 475 panic("uvm_page_physget: called _after_ bootstrap"); 476 477 if (vm_physmem[lcv].free_list != freelist) 478 continue; 479 480 /* try from front */ 481 if (vm_physmem[lcv].avail_start == vm_physmem[lcv].start && 482 vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) { 483 *paddrp = ptoa(vm_physmem[lcv].avail_start); 484 vm_physmem[lcv].avail_start++; 485 vm_physmem[lcv].start++; 486 /* nothing left? nuke it */ 487 if (vm_physmem[lcv].avail_start == 488 vm_physmem[lcv].end) { 489 if (vm_nphysseg == 1) 490 panic("uvm_page_physget: out of memory!"); 491 vm_nphysseg--; 492 for (x = lcv ; x < vm_nphysseg ; x++) 493 /* structure copy */ 494 vm_physmem[x] = vm_physmem[x+1]; 495 } 496 return (TRUE); 497 } 498 499 /* try from rear */ 500 if (vm_physmem[lcv].avail_end == vm_physmem[lcv].end && 501 vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) { 502 *paddrp = ptoa(vm_physmem[lcv].avail_end - 1); 503 vm_physmem[lcv].avail_end--; 504 vm_physmem[lcv].end--; 505 /* nothing left? nuke it */ 506 if (vm_physmem[lcv].avail_end == 507 vm_physmem[lcv].start) { 508 if (vm_nphysseg == 1) 509 panic("uvm_page_physget: out of memory!"); 510 vm_nphysseg--; 511 for (x = lcv ; x < vm_nphysseg ; x++) 512 /* structure copy */ 513 vm_physmem[x] = vm_physmem[x+1]; 514 } 515 return (TRUE); 516 } 517 } 518 519 /* pass2: forget about matching ends, just allocate something */ 520#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \ 521 (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 522 for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--) 523#else 524 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 525#endif 526 { 527 528 /* any room in this bank? */ 529 if (vm_physmem[lcv].avail_start >= vm_physmem[lcv].avail_end) 530 continue; /* nope */ 531 532 *paddrp = ptoa(vm_physmem[lcv].avail_start); 533 vm_physmem[lcv].avail_start++; 534 /* truncate! */ 535 vm_physmem[lcv].start = vm_physmem[lcv].avail_start; 536 537 /* nothing left? nuke it */ 538 if (vm_physmem[lcv].avail_start == vm_physmem[lcv].end) { 539 if (vm_nphysseg == 1) 540 panic("uvm_page_physget: out of memory!"); 541 vm_nphysseg--; 542 for (x = lcv ; x < vm_nphysseg ; x++) 543 /* structure copy */ 544 vm_physmem[x] = vm_physmem[x+1]; 545 } 546 return (TRUE); 547 } 548 549 return (FALSE); /* whoops! */ 550} 551 552boolean_t 553uvm_page_physget(paddr_t *paddrp) 554{ 555 int i; 556 UVMHIST_FUNC("uvm_page_physget"); UVMHIST_CALLED(pghist); 557 558 /* try in the order of freelist preference */ 559 for (i = 0; i < VM_NFREELIST; i++) 560 if (uvm_page_physget_freelist(paddrp, i) == TRUE) 561 return (TRUE); 562 return (FALSE); 563} 564#endif /* PMAP_STEAL_MEMORY */ 565 566/* 567 * uvm_page_physload: load physical memory into VM system 568 * 569 * => all args are PFs 570 * => all pages in start/end get vm_page structures 571 * => areas marked by avail_start/avail_end get added to the free page pool 572 * => we are limited to VM_PHYSSEG_MAX physical memory segments 573 */ 574 575void 576uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start, 577 paddr_t avail_end, int free_list) 578{ 579 int preload, lcv; 580 psize_t npages; 581 struct vm_page *pgs; 582 struct vm_physseg *ps; 583 584 if (uvmexp.pagesize == 0) 585 panic("uvm_page_physload: page size not set!"); 586 587 if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT) 588 panic("uvm_page_physload: bad free list %d", free_list); 589 590 if (start >= end) 591 panic("uvm_page_physload: start >= end"); 592 593 /* 594 * do we have room? 595 */ 596 if (vm_nphysseg == VM_PHYSSEG_MAX) { 597 printf("uvm_page_physload: unable to load physical memory " 598 "segment\n"); 599 printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n", 600 VM_PHYSSEG_MAX, (long long)start, (long long)end); 601 printf("\tincrease VM_PHYSSEG_MAX\n"); 602 return; 603 } 604 605 /* 606 * check to see if this is a "preload" (i.e. uvm_mem_init hasn't been 607 * called yet, so malloc is not available). 608 */ 609 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) { 610 if (vm_physmem[lcv].pgs) 611 break; 612 } 613 preload = (lcv == vm_nphysseg); 614 615 /* 616 * if VM is already running, attempt to malloc() vm_page structures 617 */ 618 if (!preload) { 619#if defined(VM_PHYSSEG_NOADD) 620 panic("uvm_page_physload: tried to add RAM after vm_mem_init"); 621#else 622 /* XXXCDC: need some sort of lockout for this case */ 623 paddr_t paddr; 624 npages = end - start; /* # of pages */ 625 pgs = (vm_page *)uvm_km_zalloc(kernel_map, 626 sizeof(struct vm_page) * npages); 627 if (pgs == NULL) { 628 printf("uvm_page_physload: can not malloc vm_page " 629 "structs for segment\n"); 630 printf("\tignoring 0x%lx -> 0x%lx\n", start, end); 631 return; 632 } 633 /* init phys_addr and free_list, and free pages */ 634 for (lcv = 0, paddr = ptoa(start) ; 635 lcv < npages ; lcv++, paddr += PAGE_SIZE) { 636 pgs[lcv].phys_addr = paddr; 637 pgs[lcv].free_list = free_list; 638 if (atop(paddr) >= avail_start && 639 atop(paddr) <= avail_end) 640 uvm_pagefree(&pgs[lcv]); 641 } 642 /* XXXCDC: incomplete: need to update uvmexp.free, what else? */ 643 /* XXXCDC: need hook to tell pmap to rebuild pv_list, etc... */ 644#endif 645 } else { 646 647 /* gcc complains if these don't get init'd */ 648 pgs = NULL; 649 npages = 0; 650 651 } 652 653 /* 654 * now insert us in the proper place in vm_physmem[] 655 */ 656 657#if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM) 658 659 /* random: put it at the end (easy!) */ 660 ps = &vm_physmem[vm_nphysseg]; 661 662#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 663 664 { 665 int x; 666 /* sort by address for binary search */ 667 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 668 if (start < vm_physmem[lcv].start) 669 break; 670 ps = &vm_physmem[lcv]; 671 /* move back other entries, if necessary ... */ 672 for (x = vm_nphysseg ; x > lcv ; x--) 673 /* structure copy */ 674 vm_physmem[x] = vm_physmem[x - 1]; 675 } 676 677#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) 678 679 { 680 int x; 681 /* sort by largest segment first */ 682 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 683 if ((end - start) > 684 (vm_physmem[lcv].end - vm_physmem[lcv].start)) 685 break; 686 ps = &vm_physmem[lcv]; 687 /* move back other entries, if necessary ... */ 688 for (x = vm_nphysseg ; x > lcv ; x--) 689 /* structure copy */ 690 vm_physmem[x] = vm_physmem[x - 1]; 691 } 692 693#else 694 695 panic("uvm_page_physload: unknown physseg strategy selected!"); 696 697#endif 698 699 ps->start = start; 700 ps->end = end; 701 ps->avail_start = avail_start; 702 ps->avail_end = avail_end; 703 if (preload) { 704 ps->pgs = NULL; 705 } else { 706 ps->pgs = pgs; 707 ps->lastpg = pgs + npages - 1; 708 } 709 ps->free_list = free_list; 710 vm_nphysseg++; 711 712 /* 713 * done! 714 */ 715 716 return; 717} 718 719#ifdef DDB /* XXXCDC: TMP TMP TMP DEBUG DEBUG DEBUG */ 720 721void uvm_page_physdump(void); /* SHUT UP GCC */ 722 723/* call from DDB */ 724void 725uvm_page_physdump(void) 726{ 727 int lcv; 728 729 printf("rehash: physical memory config [segs=%d of %d]:\n", 730 vm_nphysseg, VM_PHYSSEG_MAX); 731 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 732 printf("0x%llx->0x%llx [0x%llx->0x%llx]\n", 733 (long long)vm_physmem[lcv].start, 734 (long long)vm_physmem[lcv].end, 735 (long long)vm_physmem[lcv].avail_start, 736 (long long)vm_physmem[lcv].avail_end); 737 printf("STRATEGY = "); 738 switch (VM_PHYSSEG_STRAT) { 739 case VM_PSTRAT_RANDOM: printf("RANDOM\n"); break; 740 case VM_PSTRAT_BSEARCH: printf("BSEARCH\n"); break; 741 case VM_PSTRAT_BIGFIRST: printf("BIGFIRST\n"); break; 742 default: printf("<<UNKNOWN>>!!!!\n"); 743 } 744} 745#endif 746 747void 748uvm_shutdown(void) 749{ 750#ifdef UVM_SWAP_ENCRYPT 751 uvm_swap_finicrypt_all(); 752#endif 753} 754 755/* 756 * uvm_pagealloc_strat: allocate vm_page from a particular free list. 757 * 758 * => return null if no pages free 759 * => wake up pagedaemon if number of free pages drops below low water mark 760 * => if obj != NULL, obj must be locked (to put in hash) 761 * => if anon != NULL, anon must be locked (to put in anon) 762 * => only one of obj or anon can be non-null 763 * => caller must activate/deactivate page if it is not wired. 764 * => free_list is ignored if strat == UVM_PGA_STRAT_NORMAL. 765 * => policy decision: it is more important to pull a page off of the 766 * appropriate priority free list than it is to get a zero'd or 767 * unknown contents page. This is because we live with the 768 * consequences of a bad free list decision for the entire 769 * lifetime of the page, e.g. if the page comes from memory that 770 * is slower to access. 771 */ 772 773struct vm_page * 774uvm_pagealloc_strat(struct uvm_object *obj, voff_t off, struct vm_anon *anon, 775 int flags, int strat, int free_list) 776{ 777 struct pglist pgl; 778 int pmr_flags; 779 struct vm_page *pg; 780 boolean_t use_reserve; 781 UVMHIST_FUNC("uvm_pagealloc_strat"); UVMHIST_CALLED(pghist); 782 783 KASSERT(obj == NULL || anon == NULL); 784 KASSERT(off == trunc_page(off)); 785 786 /* 787 * check to see if we need to generate some free pages waking 788 * the pagedaemon. 789 */ 790 if ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freemin || 791 ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freetarg && 792 uvmexp.inactive < uvmexp.inactarg)) 793 wakeup(&uvm.pagedaemon_proc); 794 795 /* 796 * fail if any of these conditions is true: 797 * [1] there really are no free pages, or 798 * [2] only kernel "reserved" pages remain and 799 * the page isn't being allocated to a kernel object. 800 * [3] only pagedaemon "reserved" pages remain and 801 * the requestor isn't the pagedaemon. 802 */ 803 804 use_reserve = (flags & UVM_PGA_USERESERVE) || 805 (obj && UVM_OBJ_IS_KERN_OBJECT(obj)); 806 if ((uvmexp.free <= uvmexp.reserve_kernel && !use_reserve) || 807 (uvmexp.free <= uvmexp.reserve_pagedaemon && 808 !((curproc == uvm.pagedaemon_proc) || 809 (curproc == syncerproc)))) 810 goto fail; 811 812 pmr_flags = UVM_PLA_NOWAIT; 813 if (flags & UVM_PGA_ZERO) 814 pmr_flags |= UVM_PLA_ZERO; 815 TAILQ_INIT(&pgl); 816 if (uvm_pmr_getpages(1, 0, 0, 1, 0, 1, pmr_flags, &pgl) != 0) 817 goto fail; 818 pg = TAILQ_FIRST(&pgl); 819 KASSERT(pg != NULL); 820 KASSERT(TAILQ_NEXT(pg, pageq) == NULL); 821 822 pg->offset = off; 823 pg->uobject = obj; 824 pg->uanon = anon; 825 pg->pg_flags = PG_BUSY|PG_FAKE; 826 if (!(flags & UVM_PGA_ZERO)) 827 atomic_setbits_int(&pg->pg_flags, PG_CLEAN); 828 if (anon) { 829 anon->an_page = pg; 830 atomic_setbits_int(&pg->pg_flags, PQ_ANON); 831#ifdef UBC 832 uvm_pgcnt_anon++; 833#endif 834 } else { 835 if (obj) 836 uvm_pageinsert(pg); 837 } 838#if defined(UVM_PAGE_TRKOWN) 839 pg->owner_tag = NULL; 840#endif 841 UVM_PAGE_OWN(pg, "new alloc"); 842 843 UVMHIST_LOG(pghist, "allocated pg %p/%lx", pg, 844 (u_long)VM_PAGE_TO_PHYS(pg), 0, 0); 845 return(pg); 846 847 fail: 848 UVMHIST_LOG(pghist, "failed!", 0, 0, 0, 0); 849 return (NULL); 850} 851 852/* 853 * uvm_pagerealloc: reallocate a page from one object to another 854 * 855 * => both objects must be locked 856 */ 857 858void 859uvm_pagerealloc(struct vm_page *pg, struct uvm_object *newobj, voff_t newoff) 860{ 861 862 UVMHIST_FUNC("uvm_pagerealloc"); UVMHIST_CALLED(pghist); 863 864 /* 865 * remove it from the old object 866 */ 867 868 if (pg->uobject) { 869 uvm_pageremove(pg); 870 } 871 872 /* 873 * put it in the new object 874 */ 875 876 if (newobj) { 877 pg->uobject = newobj; 878 pg->offset = newoff; 879 pg->pg_version++; 880 uvm_pageinsert(pg); 881 } 882} 883 884 885/* 886 * uvm_pagefree: free page 887 * 888 * => erase page's identity (i.e. remove from object) 889 * => put page on free list 890 * => caller must lock owning object (either anon or uvm_object) 891 * => caller must lock page queues 892 * => assumes all valid mappings of pg are gone 893 */ 894 895void 896uvm_pagefree(struct vm_page *pg) 897{ 898 struct pglist pgl; 899 int saved_loan_count = pg->loan_count; 900 UVMHIST_FUNC("uvm_pagefree"); UVMHIST_CALLED(pghist); 901 902#ifdef DEBUG 903 if (pg->uobject == (void *)0xdeadbeef && 904 pg->uanon == (void *)0xdeadbeef) { 905 panic("uvm_pagefree: freeing free page %p", pg); 906 } 907#endif 908 909 UVMHIST_LOG(pghist, "freeing pg %p/%lx", pg, 910 (u_long)VM_PAGE_TO_PHYS(pg), 0, 0); 911 912 /* 913 * if the page was an object page (and thus "TABLED"), remove it 914 * from the object. 915 */ 916 917 if (pg->pg_flags & PG_TABLED) { 918 919 /* 920 * if the object page is on loan we are going to drop ownership. 921 * it is possible that an anon will take over as owner for this 922 * page later on. the anon will want a !PG_CLEAN page so that 923 * it knows it needs to allocate swap if it wants to page the 924 * page out. 925 */ 926 927 /* in case an anon takes over */ 928 if (saved_loan_count) 929 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 930 uvm_pageremove(pg); 931 932 /* 933 * if our page was on loan, then we just lost control over it 934 * (in fact, if it was loaned to an anon, the anon may have 935 * already taken over ownership of the page by now and thus 936 * changed the loan_count [e.g. in uvmfault_anonget()]) we just 937 * return (when the last loan is dropped, then the page can be 938 * freed by whatever was holding the last loan). 939 */ 940 941 if (saved_loan_count) 942 return; 943 } else if (saved_loan_count && pg->uanon) { 944 /* 945 * if our page is owned by an anon and is loaned out to the 946 * kernel then we just want to drop ownership and return. 947 * the kernel must free the page when all its loans clear ... 948 * note that the kernel can't change the loan status of our 949 * page as long as we are holding PQ lock. 950 */ 951 atomic_clearbits_int(&pg->pg_flags, PQ_ANON); 952 pg->uanon->an_page = NULL; 953 pg->uanon = NULL; 954 return; 955 } 956 KASSERT(saved_loan_count == 0); 957 958 /* 959 * now remove the page from the queues 960 */ 961 962 if (pg->pg_flags & PQ_ACTIVE) { 963 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 964 atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE); 965 uvmexp.active--; 966 } 967 if (pg->pg_flags & PQ_INACTIVE) { 968 if (pg->pg_flags & PQ_SWAPBACKED) 969 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); 970 else 971 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); 972 atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE); 973 uvmexp.inactive--; 974 } 975 976 /* 977 * if the page was wired, unwire it now. 978 */ 979 980 if (pg->wire_count) { 981 pg->wire_count = 0; 982 uvmexp.wired--; 983 } 984 if (pg->uanon) { 985 atomic_clearbits_int(&pg->pg_flags, PQ_ANON); 986 pg->uanon->an_page = NULL; 987 pg->uanon = NULL; 988#ifdef UBC 989 uvm_pgcnt_anon--; 990#endif 991 } 992 993 /* 994 * Clean page state bits. 995 */ 996 atomic_clearbits_int(&pg->pg_flags, PG_ZERO|PG_FAKE|PG_BUSY| 997 PG_RELEASED|PG_CLEAN|PG_CLEANCHK|PQ_ENCRYPT); 998 /* 999 * Pmap flag cleaning. 1000 * XXX: Shouldn't pmap do this? 1001 */ 1002 atomic_clearbits_int(&pg->pg_flags, 1003 PG_PMAP0|PG_PMAP1|PG_PMAP2|PG_PMAP3); 1004 1005#if defined(DIAGNOSTIC) 1006 if (pg->pg_flags != 0) { 1007 panic("uvm_pagefree: expected page %p pg_flags to be 0\n" 1008 "uvm_pagefree: instead of pg->pg_flags = %x\n", 1009 VM_PAGE_TO_PHYS(pg), pg->pg_flags); 1010 } 1011#endif 1012#ifdef DEBUG 1013 pg->uobject = (void *)0xdeadbeef; 1014 pg->offset = 0xdeadbeef; 1015 pg->uanon = (void *)0xdeadbeef; 1016#endif 1017 TAILQ_INIT(&pgl); 1018 TAILQ_INSERT_HEAD(&pgl, pg, pageq); 1019 uvm_pmr_freepageq(&pgl); 1020 1021 if (uvmexp.zeropages < UVM_PAGEZERO_TARGET) 1022 uvm.page_idle_zero = vm_page_zero_enable; 1023} 1024 1025/* 1026 * uvm_page_unbusy: unbusy an array of pages. 1027 * 1028 * => pages must either all belong to the same object, or all belong to anons. 1029 * => if pages are object-owned, object must be locked. 1030 * => if pages are anon-owned, anons must be unlockd and have 0 refcount. 1031 */ 1032 1033void 1034uvm_page_unbusy(struct vm_page **pgs, int npgs) 1035{ 1036 struct vm_page *pg; 1037 struct uvm_object *uobj; 1038 int i; 1039 UVMHIST_FUNC("uvm_page_unbusy"); UVMHIST_CALLED(pdhist); 1040 1041 for (i = 0; i < npgs; i++) { 1042 pg = pgs[i]; 1043 1044 if (pg == NULL || pg == PGO_DONTCARE) { 1045 continue; 1046 } 1047 if (pg->pg_flags & PG_WANTED) { 1048 wakeup(pg); 1049 } 1050 if (pg->pg_flags & PG_RELEASED) { 1051 UVMHIST_LOG(pdhist, "releasing pg %p", pg,0,0,0); 1052 uobj = pg->uobject; 1053 if (uobj != NULL) { 1054 uvm_lock_pageq(); 1055 pmap_page_protect(pg, VM_PROT_NONE); 1056 /* XXX won't happen right now */ 1057 if (pg->pg_flags & PQ_ANON) 1058 uao_dropswap(uobj, 1059 pg->offset >> PAGE_SHIFT); 1060 uvm_pagefree(pg); 1061 uvm_unlock_pageq(); 1062 } else { 1063 atomic_clearbits_int(&pg->pg_flags, PG_BUSY); 1064 UVM_PAGE_OWN(pg, NULL); 1065 uvm_anfree(pg->uanon); 1066 } 1067 } else { 1068 UVMHIST_LOG(pdhist, "unbusying pg %p", pg,0,0,0); 1069 atomic_clearbits_int(&pg->pg_flags, PG_WANTED|PG_BUSY); 1070 UVM_PAGE_OWN(pg, NULL); 1071 } 1072 } 1073} 1074 1075#if defined(UVM_PAGE_TRKOWN) 1076/* 1077 * uvm_page_own: set or release page ownership 1078 * 1079 * => this is a debugging function that keeps track of who sets PG_BUSY 1080 * and where they do it. it can be used to track down problems 1081 * such a process setting "PG_BUSY" and never releasing it. 1082 * => page's object [if any] must be locked 1083 * => if "tag" is NULL then we are releasing page ownership 1084 */ 1085void 1086uvm_page_own(struct vm_page *pg, char *tag) 1087{ 1088 /* gain ownership? */ 1089 if (tag) { 1090 if (pg->owner_tag) { 1091 printf("uvm_page_own: page %p already owned " 1092 "by proc %d [%s]\n", pg, 1093 pg->owner, pg->owner_tag); 1094 panic("uvm_page_own"); 1095 } 1096 pg->owner = (curproc) ? curproc->p_pid : (pid_t) -1; 1097 pg->owner_tag = tag; 1098 return; 1099 } 1100 1101 /* drop ownership */ 1102 if (pg->owner_tag == NULL) { 1103 printf("uvm_page_own: dropping ownership of an non-owned " 1104 "page (%p)\n", pg); 1105 panic("uvm_page_own"); 1106 } 1107 pg->owner_tag = NULL; 1108 return; 1109} 1110#endif 1111 1112/* 1113 * uvm_pageidlezero: zero free pages while the system is idle. 1114 * 1115 * => we do at least one iteration per call, if we are below the target. 1116 * => we loop until we either reach the target or whichqs indicates that 1117 * there is a process ready to run. 1118 */ 1119void 1120uvm_pageidlezero(void) 1121{ 1122#if 0 /* Disabled for now. */ 1123 struct vm_page *pg; 1124 struct pgfreelist *pgfl; 1125 int free_list; 1126 UVMHIST_FUNC("uvm_pageidlezero"); UVMHIST_CALLED(pghist); 1127 1128 do { 1129 uvm_lock_fpageq(); 1130 1131 if (uvmexp.zeropages >= UVM_PAGEZERO_TARGET) { 1132 uvm.page_idle_zero = FALSE; 1133 uvm_unlock_fpageq(); 1134 return; 1135 } 1136 1137 for (free_list = 0; free_list < VM_NFREELIST; free_list++) { 1138 pgfl = &uvm.page_free[free_list]; 1139 if ((pg = TAILQ_FIRST(&pgfl->pgfl_queues[ 1140 PGFL_UNKNOWN])) != NULL) 1141 break; 1142 } 1143 1144 if (pg == NULL) { 1145 /* 1146 * No non-zero'd pages; don't bother trying again 1147 * until we know we have non-zero'd pages free. 1148 */ 1149 uvm.page_idle_zero = FALSE; 1150 uvm_unlock_fpageq(); 1151 return; 1152 } 1153 1154 TAILQ_REMOVE(&pgfl->pgfl_queues[PGFL_UNKNOWN], pg, pageq); 1155 uvmexp.free--; 1156 uvm_unlock_fpageq(); 1157 1158#ifdef PMAP_PAGEIDLEZERO 1159 if (PMAP_PAGEIDLEZERO(pg) == FALSE) { 1160 /* 1161 * The machine-dependent code detected some 1162 * reason for us to abort zeroing pages, 1163 * probably because there is a process now 1164 * ready to run. 1165 */ 1166 uvm_lock_fpageq(); 1167 TAILQ_INSERT_HEAD(&pgfl->pgfl_queues[PGFL_UNKNOWN], 1168 pg, pageq); 1169 uvmexp.free++; 1170 uvmexp.zeroaborts++; 1171 uvm_unlock_fpageq(); 1172 return; 1173 } 1174#else 1175 /* 1176 * XXX This will toast the cache unless the pmap_zero_page() 1177 * XXX implementation does uncached access. 1178 */ 1179 pmap_zero_page(pg); 1180#endif 1181 atomic_setbits_int(&pg->pg_flags, PG_ZERO); 1182 1183 uvm_lock_fpageq(); 1184 TAILQ_INSERT_HEAD(&pgfl->pgfl_queues[PGFL_ZEROS], pg, pageq); 1185 uvmexp.free++; 1186 uvmexp.zeropages++; 1187 uvm_unlock_fpageq(); 1188 } while (curcpu_is_idle()); 1189#endif /* 0 */ 1190} 1191 1192/* 1193 * when VM_PHYSSEG_MAX is 1, we can simplify these functions 1194 */ 1195 1196#if VM_PHYSSEG_MAX > 1 1197/* 1198 * vm_physseg_find: find vm_physseg structure that belongs to a PA 1199 */ 1200int 1201vm_physseg_find(paddr_t pframe, int *offp) 1202{ 1203 1204#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 1205 /* binary search for it */ 1206 int start, len, try; 1207 1208 /* 1209 * if try is too large (thus target is less than than try) we reduce 1210 * the length to trunc(len/2) [i.e. everything smaller than "try"] 1211 * 1212 * if the try is too small (thus target is greater than try) then 1213 * we set the new start to be (try + 1). this means we need to 1214 * reduce the length to (round(len/2) - 1). 1215 * 1216 * note "adjust" below which takes advantage of the fact that 1217 * (round(len/2) - 1) == trunc((len - 1) / 2) 1218 * for any value of len we may have 1219 */ 1220 1221 for (start = 0, len = vm_nphysseg ; len != 0 ; len = len / 2) { 1222 try = start + (len / 2); /* try in the middle */ 1223 1224 /* start past our try? */ 1225 if (pframe >= vm_physmem[try].start) { 1226 /* was try correct? */ 1227 if (pframe < vm_physmem[try].end) { 1228 if (offp) 1229 *offp = pframe - vm_physmem[try].start; 1230 return(try); /* got it */ 1231 } 1232 start = try + 1; /* next time, start here */ 1233 len--; /* "adjust" */ 1234 } else { 1235 /* 1236 * pframe before try, just reduce length of 1237 * region, done in "for" loop 1238 */ 1239 } 1240 } 1241 return(-1); 1242 1243#else 1244 /* linear search for it */ 1245 int lcv; 1246 1247 for (lcv = 0; lcv < vm_nphysseg; lcv++) { 1248 if (pframe >= vm_physmem[lcv].start && 1249 pframe < vm_physmem[lcv].end) { 1250 if (offp) 1251 *offp = pframe - vm_physmem[lcv].start; 1252 return(lcv); /* got it */ 1253 } 1254 } 1255 return(-1); 1256 1257#endif 1258} 1259 1260/* 1261 * PHYS_TO_VM_PAGE: find vm_page for a PA. used by MI code to get vm_pages 1262 * back from an I/O mapping (ugh!). used in some MD code as well. 1263 */ 1264struct vm_page * 1265PHYS_TO_VM_PAGE(paddr_t pa) 1266{ 1267 paddr_t pf = atop(pa); 1268 int off; 1269 int psi; 1270 1271 psi = vm_physseg_find(pf, &off); 1272 1273 return ((psi == -1) ? NULL : &vm_physmem[psi].pgs[off]); 1274} 1275#endif /* VM_PHYSSEG_MAX > 1 */ 1276 1277/* 1278 * uvm_pagelookup: look up a page 1279 * 1280 * => caller should lock object to keep someone from pulling the page 1281 * out from under it 1282 */ 1283struct vm_page * 1284uvm_pagelookup(struct uvm_object *obj, voff_t off) 1285{ 1286 struct vm_page find; 1287 1288 find.offset = off; 1289 return (RB_FIND(uobj_pgs, &obj->memt, &find)); 1290} 1291 1292/* 1293 * uvm_pagewire: wire the page, thus removing it from the daemon's grasp 1294 * 1295 * => caller must lock page queues 1296 */ 1297void 1298uvm_pagewire(struct vm_page *pg) 1299{ 1300 if (pg->wire_count == 0) { 1301 if (pg->pg_flags & PQ_ACTIVE) { 1302 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1303 atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE); 1304 uvmexp.active--; 1305 } 1306 if (pg->pg_flags & PQ_INACTIVE) { 1307 if (pg->pg_flags & PQ_SWAPBACKED) 1308 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); 1309 else 1310 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); 1311 atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE); 1312 uvmexp.inactive--; 1313 } 1314 uvmexp.wired++; 1315 } 1316 pg->wire_count++; 1317} 1318 1319/* 1320 * uvm_pageunwire: unwire the page. 1321 * 1322 * => activate if wire count goes to zero. 1323 * => caller must lock page queues 1324 */ 1325void 1326uvm_pageunwire(struct vm_page *pg) 1327{ 1328 pg->wire_count--; 1329 if (pg->wire_count == 0) { 1330 TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq); 1331 uvmexp.active++; 1332 atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE); 1333 uvmexp.wired--; 1334 } 1335} 1336 1337/* 1338 * uvm_pagedeactivate: deactivate page -- no pmaps have access to page 1339 * 1340 * => caller must lock page queues 1341 * => caller must check to make sure page is not wired 1342 * => object that page belongs to must be locked (so we can adjust pg->flags) 1343 */ 1344void 1345uvm_pagedeactivate(struct vm_page *pg) 1346{ 1347 pmap_page_protect(pg, VM_PROT_NONE); 1348 1349 if (pg->pg_flags & PQ_ACTIVE) { 1350 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1351 atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE); 1352 uvmexp.active--; 1353 } 1354 if ((pg->pg_flags & PQ_INACTIVE) == 0) { 1355 KASSERT(pg->wire_count == 0); 1356 if (pg->pg_flags & PQ_SWAPBACKED) 1357 TAILQ_INSERT_TAIL(&uvm.page_inactive_swp, pg, pageq); 1358 else 1359 TAILQ_INSERT_TAIL(&uvm.page_inactive_obj, pg, pageq); 1360 atomic_setbits_int(&pg->pg_flags, PQ_INACTIVE); 1361 uvmexp.inactive++; 1362 pmap_clear_reference(pg); 1363 /* 1364 * update the "clean" bit. this isn't 100% 1365 * accurate, and doesn't have to be. we'll 1366 * re-sync it after we zap all mappings when 1367 * scanning the inactive list. 1368 */ 1369 if ((pg->pg_flags & PG_CLEAN) != 0 && 1370 pmap_is_modified(pg)) 1371 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 1372 } 1373} 1374 1375/* 1376 * uvm_pageactivate: activate page 1377 * 1378 * => caller must lock page queues 1379 */ 1380void 1381uvm_pageactivate(struct vm_page *pg) 1382{ 1383 if (pg->pg_flags & PQ_INACTIVE) { 1384 if (pg->pg_flags & PQ_SWAPBACKED) 1385 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); 1386 else 1387 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); 1388 atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE); 1389 uvmexp.inactive--; 1390 } 1391 if (pg->wire_count == 0) { 1392 1393 /* 1394 * if page is already active, remove it from list so we 1395 * can put it at tail. if it wasn't active, then mark 1396 * it active and bump active count 1397 */ 1398 if (pg->pg_flags & PQ_ACTIVE) 1399 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1400 else { 1401 atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE); 1402 uvmexp.active++; 1403 } 1404 1405 TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq); 1406 } 1407} 1408 1409/* 1410 * uvm_pagezero: zero fill a page 1411 * 1412 * => if page is part of an object then the object should be locked 1413 * to protect pg->flags. 1414 */ 1415void 1416uvm_pagezero(struct vm_page *pg) 1417{ 1418 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 1419 pmap_zero_page(pg); 1420} 1421 1422/* 1423 * uvm_pagecopy: copy a page 1424 * 1425 * => if page is part of an object then the object should be locked 1426 * to protect pg->flags. 1427 */ 1428void 1429uvm_pagecopy(struct vm_page *src, struct vm_page *dst) 1430{ 1431 atomic_clearbits_int(&dst->pg_flags, PG_CLEAN); 1432 pmap_copy_page(src, dst); 1433} 1434 1435/* 1436 * uvm_page_lookup_freelist: look up the free list for the specified page 1437 */ 1438int 1439uvm_page_lookup_freelist(struct vm_page *pg) 1440{ 1441#if VM_PHYSSEG_MAX == 1 1442 return (vm_physmem[0].free_list); 1443#else 1444 int lcv; 1445 1446 lcv = vm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), NULL); 1447 KASSERT(lcv != -1); 1448 return (vm_physmem[lcv].free_list); 1449#endif 1450} 1451