uvm_page.c revision 1.87
1/* $OpenBSD: uvm_page.c,v 1.87 2009/06/07 02:01:54 oga Exp $ */ 2/* $NetBSD: uvm_page.c,v 1.44 2000/11/27 08:40:04 chs Exp $ */ 3 4/* 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * Copyright (c) 1991, 1993, The Regents of the University of California. 7 * 8 * All rights reserved. 9 * 10 * This code is derived from software contributed to Berkeley by 11 * The Mach Operating System project at Carnegie-Mellon University. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by Charles D. Cranor, 24 * Washington University, the University of California, Berkeley and 25 * its contributors. 26 * 4. Neither the name of the University nor the names of its contributors 27 * may be used to endorse or promote products derived from this software 28 * without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 * 42 * @(#)vm_page.c 8.3 (Berkeley) 3/21/94 43 * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp 44 * 45 * 46 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 47 * All rights reserved. 48 * 49 * Permission to use, copy, modify and distribute this software and 50 * its documentation is hereby granted, provided that both the copyright 51 * notice and this permission notice appear in all copies of the 52 * software, derivative works or modified versions, and any portions 53 * thereof, and that both notices appear in supporting documentation. 54 * 55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 56 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 58 * 59 * Carnegie Mellon requests users of this software to return to 60 * 61 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 62 * School of Computer Science 63 * Carnegie Mellon University 64 * Pittsburgh PA 15213-3890 65 * 66 * any improvements or extensions that they make and grant Carnegie the 67 * rights to redistribute these changes. 68 */ 69 70/* 71 * uvm_page.c: page ops. 72 */ 73 74#include <sys/param.h> 75#include <sys/systm.h> 76#include <sys/sched.h> 77#include <sys/kernel.h> 78#include <sys/vnode.h> 79#include <sys/mount.h> 80 81#include <uvm/uvm.h> 82 83/* 84 * global vars... XXXCDC: move to uvm. structure. 85 */ 86 87/* 88 * physical memory config is stored in vm_physmem. 89 */ 90 91struct vm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */ 92int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */ 93 94/* 95 * Some supported CPUs in a given architecture don't support all 96 * of the things necessary to do idle page zero'ing efficiently. 97 * We therefore provide a way to disable it from machdep code here. 98 */ 99 100/* 101 * XXX disabled until we can find a way to do this without causing 102 * problems for either cpu caches or DMA latency. 103 */ 104boolean_t vm_page_zero_enable = FALSE; 105 106/* 107 * local variables 108 */ 109 110/* 111 * these variables record the values returned by vm_page_bootstrap, 112 * for debugging purposes. The implementation of uvm_pageboot_alloc 113 * and pmap_startup here also uses them internally. 114 */ 115 116static vaddr_t virtual_space_start; 117static vaddr_t virtual_space_end; 118 119/* 120 * History 121 */ 122UVMHIST_DECL(pghist); 123 124/* 125 * local prototypes 126 */ 127 128static void uvm_pageinsert(struct vm_page *); 129static void uvm_pageremove(struct vm_page *); 130 131/* 132 * inline functions 133 */ 134 135/* 136 * uvm_pageinsert: insert a page in the object 137 * 138 * => caller must lock object 139 * => caller must lock page queues 140 * => call should have already set pg's object and offset pointers 141 * and bumped the version counter 142 */ 143 144__inline static void 145uvm_pageinsert(struct vm_page *pg) 146{ 147 UVMHIST_FUNC("uvm_pageinsert"); UVMHIST_CALLED(pghist); 148 149 KASSERT((pg->pg_flags & PG_TABLED) == 0); 150 151 RB_INSERT(uobj_pgs, &pg->uobject->memt, pg); 152 atomic_setbits_int(&pg->pg_flags, PG_TABLED); 153 pg->uobject->uo_npages++; 154} 155 156/* 157 * uvm_page_remove: remove page from object 158 * 159 * => caller must lock object 160 * => caller must lock page queues 161 */ 162 163static __inline void 164uvm_pageremove(struct vm_page *pg) 165{ 166 UVMHIST_FUNC("uvm_pageremove"); UVMHIST_CALLED(pghist); 167 168 KASSERT(pg->pg_flags & PG_TABLED); 169 /* object should be locked */ 170 RB_REMOVE(uobj_pgs, &pg->uobject->memt, pg); 171 172 atomic_clearbits_int(&pg->pg_flags, PG_TABLED|PQ_AOBJ); 173 pg->uobject->uo_npages--; 174 pg->uobject = NULL; 175 pg->pg_version++; 176} 177 178int 179uvm_pagecmp(struct vm_page *a, struct vm_page *b) 180{ 181 return (a->offset < b->offset ? -1 : a->offset > b->offset); 182} 183 184RB_GENERATE(uobj_pgs, vm_page, fq.queues.tree, uvm_pagecmp); 185 186/* 187 * uvm_page_init: init the page system. called from uvm_init(). 188 * 189 * => we return the range of kernel virtual memory in kvm_startp/kvm_endp 190 */ 191 192void 193uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp) 194{ 195 vsize_t freepages, pagecount, n; 196 vm_page_t pagearray; 197 int lcv, i; 198 paddr_t paddr; 199#if defined(UVMHIST) 200 static struct uvm_history_ent pghistbuf[100]; 201#endif 202 203 UVMHIST_FUNC("uvm_page_init"); 204 UVMHIST_INIT_STATIC(pghist, pghistbuf); 205 UVMHIST_CALLED(pghist); 206 207 /* 208 * init the page queues and page queue locks 209 */ 210 211 TAILQ_INIT(&uvm.page_active); 212 TAILQ_INIT(&uvm.page_inactive_swp); 213 TAILQ_INIT(&uvm.page_inactive_obj); 214 simple_lock_init(&uvm.pageqlock); 215 mtx_init(&uvm.fpageqlock, IPL_VM); 216 uvm_pmr_init(); 217 218 /* 219 * allocate vm_page structures. 220 */ 221 222 /* 223 * sanity check: 224 * before calling this function the MD code is expected to register 225 * some free RAM with the uvm_page_physload() function. our job 226 * now is to allocate vm_page structures for this memory. 227 */ 228 229 if (vm_nphysseg == 0) 230 panic("uvm_page_bootstrap: no memory pre-allocated"); 231 232 /* 233 * first calculate the number of free pages... 234 * 235 * note that we use start/end rather than avail_start/avail_end. 236 * this allows us to allocate extra vm_page structures in case we 237 * want to return some memory to the pool after booting. 238 */ 239 240 freepages = 0; 241 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 242 freepages += (vm_physmem[lcv].end - vm_physmem[lcv].start); 243 244 /* 245 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can 246 * use. for each page of memory we use we need a vm_page structure. 247 * thus, the total number of pages we can use is the total size of 248 * the memory divided by the PAGE_SIZE plus the size of the vm_page 249 * structure. we add one to freepages as a fudge factor to avoid 250 * truncation errors (since we can only allocate in terms of whole 251 * pages). 252 */ 253 254 pagecount = (((paddr_t)freepages + 1) << PAGE_SHIFT) / 255 (PAGE_SIZE + sizeof(struct vm_page)); 256 pagearray = (vm_page_t)uvm_pageboot_alloc(pagecount * 257 sizeof(struct vm_page)); 258 memset(pagearray, 0, pagecount * sizeof(struct vm_page)); 259 260 /* 261 * init the vm_page structures and put them in the correct place. 262 */ 263 264 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) { 265 n = vm_physmem[lcv].end - vm_physmem[lcv].start; 266 if (n > pagecount) { 267 printf("uvm_page_init: lost %ld page(s) in init\n", 268 (long)(n - pagecount)); 269 panic("uvm_page_init"); /* XXXCDC: shouldn't happen? */ 270 /* n = pagecount; */ 271 } 272 273 /* set up page array pointers */ 274 vm_physmem[lcv].pgs = pagearray; 275 pagearray += n; 276 pagecount -= n; 277 vm_physmem[lcv].lastpg = vm_physmem[lcv].pgs + (n - 1); 278 279 /* init and free vm_pages (we've already zeroed them) */ 280 paddr = ptoa(vm_physmem[lcv].start); 281 for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) { 282 vm_physmem[lcv].pgs[i].phys_addr = paddr; 283#ifdef __HAVE_VM_PAGE_MD 284 VM_MDPAGE_INIT(&vm_physmem[lcv].pgs[i]); 285#endif 286 if (atop(paddr) >= vm_physmem[lcv].avail_start && 287 atop(paddr) <= vm_physmem[lcv].avail_end) { 288 uvmexp.npages++; 289 } 290 } 291 292 /* add pages to free pool */ 293 uvm_pmr_freepages(&vm_physmem[lcv].pgs[ 294 vm_physmem[lcv].avail_start - vm_physmem[lcv].start], 295 vm_physmem[lcv].avail_end - vm_physmem[lcv].avail_start); 296 } 297 298 /* 299 * pass up the values of virtual_space_start and 300 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper 301 * layers of the VM. 302 */ 303 304 *kvm_startp = round_page(virtual_space_start); 305 *kvm_endp = trunc_page(virtual_space_end); 306 307 /* 308 * init locks for kernel threads 309 */ 310 mtx_init(&uvm.aiodoned_lock, IPL_BIO); 311 312 /* 313 * init reserve thresholds 314 * XXXCDC - values may need adjusting 315 */ 316 uvmexp.reserve_pagedaemon = 4; 317 uvmexp.reserve_kernel = 6; 318 uvmexp.anonminpct = 10; 319 uvmexp.vnodeminpct = 10; 320 uvmexp.vtextminpct = 5; 321 uvmexp.anonmin = uvmexp.anonminpct * 256 / 100; 322 uvmexp.vnodemin = uvmexp.vnodeminpct * 256 / 100; 323 uvmexp.vtextmin = uvmexp.vtextminpct * 256 / 100; 324 325 /* 326 * determine if we should zero pages in the idle loop. 327 */ 328 329 uvm.page_idle_zero = vm_page_zero_enable; 330 331 /* 332 * done! 333 */ 334 335 uvm.page_init_done = TRUE; 336} 337 338/* 339 * uvm_setpagesize: set the page size 340 * 341 * => sets page_shift and page_mask from uvmexp.pagesize. 342 */ 343 344void 345uvm_setpagesize(void) 346{ 347 if (uvmexp.pagesize == 0) 348 uvmexp.pagesize = DEFAULT_PAGE_SIZE; 349 uvmexp.pagemask = uvmexp.pagesize - 1; 350 if ((uvmexp.pagemask & uvmexp.pagesize) != 0) 351 panic("uvm_setpagesize: page size not a power of two"); 352 for (uvmexp.pageshift = 0; ; uvmexp.pageshift++) 353 if ((1 << uvmexp.pageshift) == uvmexp.pagesize) 354 break; 355} 356 357/* 358 * uvm_pageboot_alloc: steal memory from physmem for bootstrapping 359 */ 360 361vaddr_t 362uvm_pageboot_alloc(vsize_t size) 363{ 364#if defined(PMAP_STEAL_MEMORY) 365 vaddr_t addr; 366 367 /* 368 * defer bootstrap allocation to MD code (it may want to allocate 369 * from a direct-mapped segment). pmap_steal_memory should round 370 * off virtual_space_start/virtual_space_end. 371 */ 372 373 addr = pmap_steal_memory(size, &virtual_space_start, 374 &virtual_space_end); 375 376 return(addr); 377 378#else /* !PMAP_STEAL_MEMORY */ 379 380 static boolean_t initialized = FALSE; 381 vaddr_t addr, vaddr; 382 paddr_t paddr; 383 384 /* round to page size */ 385 size = round_page(size); 386 387 /* 388 * on first call to this function, initialize ourselves. 389 */ 390 if (initialized == FALSE) { 391 pmap_virtual_space(&virtual_space_start, &virtual_space_end); 392 393 /* round it the way we like it */ 394 virtual_space_start = round_page(virtual_space_start); 395 virtual_space_end = trunc_page(virtual_space_end); 396 397 initialized = TRUE; 398 } 399 400 /* 401 * allocate virtual memory for this request 402 */ 403 if (virtual_space_start == virtual_space_end || 404 (virtual_space_end - virtual_space_start) < size) 405 panic("uvm_pageboot_alloc: out of virtual space"); 406 407 addr = virtual_space_start; 408 409#ifdef PMAP_GROWKERNEL 410 /* 411 * If the kernel pmap can't map the requested space, 412 * then allocate more resources for it. 413 */ 414 if (uvm_maxkaddr < (addr + size)) { 415 uvm_maxkaddr = pmap_growkernel(addr + size); 416 if (uvm_maxkaddr < (addr + size)) 417 panic("uvm_pageboot_alloc: pmap_growkernel() failed"); 418 } 419#endif 420 421 virtual_space_start += size; 422 423 /* 424 * allocate and mapin physical pages to back new virtual pages 425 */ 426 427 for (vaddr = round_page(addr) ; vaddr < addr + size ; 428 vaddr += PAGE_SIZE) { 429 430 if (!uvm_page_physget(&paddr)) 431 panic("uvm_pageboot_alloc: out of memory"); 432 433 /* 434 * Note this memory is no longer managed, so using 435 * pmap_kenter is safe. 436 */ 437 pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE); 438 } 439 pmap_update(pmap_kernel()); 440 return(addr); 441#endif /* PMAP_STEAL_MEMORY */ 442} 443 444#if !defined(PMAP_STEAL_MEMORY) 445/* 446 * uvm_page_physget: "steal" one page from the vm_physmem structure. 447 * 448 * => attempt to allocate it off the end of a segment in which the "avail" 449 * values match the start/end values. if we can't do that, then we 450 * will advance both values (making them equal, and removing some 451 * vm_page structures from the non-avail area). 452 * => return false if out of memory. 453 */ 454 455/* subroutine: try to allocate from memory chunks on the specified freelist */ 456boolean_t uvm_page_physget_freelist(paddr_t *, int); 457 458boolean_t 459uvm_page_physget_freelist(paddr_t *paddrp, int freelist) 460{ 461 int lcv, x; 462 UVMHIST_FUNC("uvm_page_physget_freelist"); UVMHIST_CALLED(pghist); 463 464 /* pass 1: try allocating from a matching end */ 465#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \ 466 (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 467 for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--) 468#else 469 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 470#endif 471 { 472 473 if (uvm.page_init_done == TRUE) 474 panic("uvm_page_physget: called _after_ bootstrap"); 475 476 if (vm_physmem[lcv].free_list != freelist) 477 continue; 478 479 /* try from front */ 480 if (vm_physmem[lcv].avail_start == vm_physmem[lcv].start && 481 vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) { 482 *paddrp = ptoa(vm_physmem[lcv].avail_start); 483 vm_physmem[lcv].avail_start++; 484 vm_physmem[lcv].start++; 485 /* nothing left? nuke it */ 486 if (vm_physmem[lcv].avail_start == 487 vm_physmem[lcv].end) { 488 if (vm_nphysseg == 1) 489 panic("uvm_page_physget: out of memory!"); 490 vm_nphysseg--; 491 for (x = lcv ; x < vm_nphysseg ; x++) 492 /* structure copy */ 493 vm_physmem[x] = vm_physmem[x+1]; 494 } 495 return (TRUE); 496 } 497 498 /* try from rear */ 499 if (vm_physmem[lcv].avail_end == vm_physmem[lcv].end && 500 vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) { 501 *paddrp = ptoa(vm_physmem[lcv].avail_end - 1); 502 vm_physmem[lcv].avail_end--; 503 vm_physmem[lcv].end--; 504 /* nothing left? nuke it */ 505 if (vm_physmem[lcv].avail_end == 506 vm_physmem[lcv].start) { 507 if (vm_nphysseg == 1) 508 panic("uvm_page_physget: out of memory!"); 509 vm_nphysseg--; 510 for (x = lcv ; x < vm_nphysseg ; x++) 511 /* structure copy */ 512 vm_physmem[x] = vm_physmem[x+1]; 513 } 514 return (TRUE); 515 } 516 } 517 518 /* pass2: forget about matching ends, just allocate something */ 519#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \ 520 (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 521 for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--) 522#else 523 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 524#endif 525 { 526 527 /* any room in this bank? */ 528 if (vm_physmem[lcv].avail_start >= vm_physmem[lcv].avail_end) 529 continue; /* nope */ 530 531 *paddrp = ptoa(vm_physmem[lcv].avail_start); 532 vm_physmem[lcv].avail_start++; 533 /* truncate! */ 534 vm_physmem[lcv].start = vm_physmem[lcv].avail_start; 535 536 /* nothing left? nuke it */ 537 if (vm_physmem[lcv].avail_start == vm_physmem[lcv].end) { 538 if (vm_nphysseg == 1) 539 panic("uvm_page_physget: out of memory!"); 540 vm_nphysseg--; 541 for (x = lcv ; x < vm_nphysseg ; x++) 542 /* structure copy */ 543 vm_physmem[x] = vm_physmem[x+1]; 544 } 545 return (TRUE); 546 } 547 548 return (FALSE); /* whoops! */ 549} 550 551boolean_t 552uvm_page_physget(paddr_t *paddrp) 553{ 554 int i; 555 UVMHIST_FUNC("uvm_page_physget"); UVMHIST_CALLED(pghist); 556 557 /* try in the order of freelist preference */ 558 for (i = 0; i < VM_NFREELIST; i++) 559 if (uvm_page_physget_freelist(paddrp, i) == TRUE) 560 return (TRUE); 561 return (FALSE); 562} 563#endif /* PMAP_STEAL_MEMORY */ 564 565/* 566 * uvm_page_physload: load physical memory into VM system 567 * 568 * => all args are PFs 569 * => all pages in start/end get vm_page structures 570 * => areas marked by avail_start/avail_end get added to the free page pool 571 * => we are limited to VM_PHYSSEG_MAX physical memory segments 572 */ 573 574void 575uvm_page_physload_flags(paddr_t start, paddr_t end, paddr_t avail_start, 576 paddr_t avail_end, int free_list, int flags) 577{ 578 int preload, lcv; 579 psize_t npages; 580 struct vm_page *pgs; 581 struct vm_physseg *ps; 582 583 if (uvmexp.pagesize == 0) 584 panic("uvm_page_physload: page size not set!"); 585 586 if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT) 587 panic("uvm_page_physload: bad free list %d", free_list); 588 589 if (start >= end) 590 panic("uvm_page_physload: start >= end"); 591 592 /* 593 * do we have room? 594 */ 595 if (vm_nphysseg == VM_PHYSSEG_MAX) { 596 printf("uvm_page_physload: unable to load physical memory " 597 "segment\n"); 598 printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n", 599 VM_PHYSSEG_MAX, (long long)start, (long long)end); 600 printf("\tincrease VM_PHYSSEG_MAX\n"); 601 return; 602 } 603 604 /* 605 * check to see if this is a "preload" (i.e. uvm_mem_init hasn't been 606 * called yet, so malloc is not available). 607 */ 608 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) { 609 if (vm_physmem[lcv].pgs) 610 break; 611 } 612 preload = (lcv == vm_nphysseg); 613 614 /* 615 * if VM is already running, attempt to malloc() vm_page structures 616 */ 617 if (!preload) { 618 /* 619 * XXXCDC: need some sort of lockout for this case 620 * right now it is only used by devices so it should be alright. 621 */ 622 paddr_t paddr; 623 624 npages = end - start; /* # of pages */ 625 626 pgs = (struct vm_page *)uvm_km_zalloc(kernel_map, 627 sizeof(struct vm_page) * npages); 628 if (pgs == NULL) { 629 printf("uvm_page_physload: can not malloc vm_page " 630 "structs for segment\n"); 631 printf("\tignoring 0x%lx -> 0x%lx\n", start, end); 632 return; 633 } 634 635 /* init phys_addr and free pages, XXX uvmexp.npages */ 636 for (lcv = 0, paddr = ptoa(start); lcv < npages; 637 lcv++, paddr += PAGE_SIZE) { 638 pgs[lcv].phys_addr = paddr; 639#ifdef __HAVE_VM_PAGE_MD 640 VM_MDPAGE_INIT(&pgs[lcv]); 641#endif 642 if (atop(paddr) >= avail_start && 643 atop(paddr) <= avail_end) { 644 if (flags & PHYSLOAD_DEVICE) { 645 atomic_setbits_int(&pgs[lcv].pg_flags, 646 PG_DEV); 647 pgs[lcv].wire_count = 1; 648 } else { 649#if defined(VM_PHYSSEG_NOADD) 650 panic("uvm_page_physload: tried to add RAM after vm_mem_init"); 651#else 652 uvm_pagefree(&pgs[lcv]); 653#endif 654 } 655 } 656 } 657 /* XXXCDC: incomplete: need to update uvmexp.free, what else? */ 658 /* XXXCDC: need hook to tell pmap to rebuild pv_list, etc... */ 659 } else { 660 661 /* gcc complains if these don't get init'd */ 662 pgs = NULL; 663 npages = 0; 664 665 } 666 667 /* 668 * now insert us in the proper place in vm_physmem[] 669 */ 670 671#if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM) 672 673 /* random: put it at the end (easy!) */ 674 ps = &vm_physmem[vm_nphysseg]; 675 676#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 677 678 { 679 int x; 680 /* sort by address for binary search */ 681 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 682 if (start < vm_physmem[lcv].start) 683 break; 684 ps = &vm_physmem[lcv]; 685 /* move back other entries, if necessary ... */ 686 for (x = vm_nphysseg ; x > lcv ; x--) 687 /* structure copy */ 688 vm_physmem[x] = vm_physmem[x - 1]; 689 } 690 691#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) 692 693 { 694 int x; 695 /* sort by largest segment first */ 696 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 697 if ((end - start) > 698 (vm_physmem[lcv].end - vm_physmem[lcv].start)) 699 break; 700 ps = &vm_physmem[lcv]; 701 /* move back other entries, if necessary ... */ 702 for (x = vm_nphysseg ; x > lcv ; x--) 703 /* structure copy */ 704 vm_physmem[x] = vm_physmem[x - 1]; 705 } 706 707#else 708 709 panic("uvm_page_physload: unknown physseg strategy selected!"); 710 711#endif 712 713 ps->start = start; 714 ps->end = end; 715 ps->avail_start = avail_start; 716 ps->avail_end = avail_end; 717 if (preload) { 718 ps->pgs = NULL; 719 } else { 720 ps->pgs = pgs; 721 ps->lastpg = pgs + npages - 1; 722 } 723 ps->free_list = free_list; 724 vm_nphysseg++; 725 726 /* 727 * done! 728 */ 729 730 return; 731} 732 733#ifdef DDB /* XXXCDC: TMP TMP TMP DEBUG DEBUG DEBUG */ 734 735void uvm_page_physdump(void); /* SHUT UP GCC */ 736 737/* call from DDB */ 738void 739uvm_page_physdump(void) 740{ 741 int lcv; 742 743 printf("rehash: physical memory config [segs=%d of %d]:\n", 744 vm_nphysseg, VM_PHYSSEG_MAX); 745 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 746 printf("0x%llx->0x%llx [0x%llx->0x%llx]\n", 747 (long long)vm_physmem[lcv].start, 748 (long long)vm_physmem[lcv].end, 749 (long long)vm_physmem[lcv].avail_start, 750 (long long)vm_physmem[lcv].avail_end); 751 printf("STRATEGY = "); 752 switch (VM_PHYSSEG_STRAT) { 753 case VM_PSTRAT_RANDOM: printf("RANDOM\n"); break; 754 case VM_PSTRAT_BSEARCH: printf("BSEARCH\n"); break; 755 case VM_PSTRAT_BIGFIRST: printf("BIGFIRST\n"); break; 756 default: printf("<<UNKNOWN>>!!!!\n"); 757 } 758} 759#endif 760 761void 762uvm_shutdown(void) 763{ 764#ifdef UVM_SWAP_ENCRYPT 765 uvm_swap_finicrypt_all(); 766#endif 767} 768 769/* 770 * uvm_pagealloc_strat: allocate vm_page from a particular free list. 771 * 772 * => return null if no pages free 773 * => wake up pagedaemon if number of free pages drops below low water mark 774 * => if obj != NULL, obj must be locked (to put in hash) 775 * => if anon != NULL, anon must be locked (to put in anon) 776 * => only one of obj or anon can be non-null 777 * => caller must activate/deactivate page if it is not wired. 778 * => free_list is ignored if strat == UVM_PGA_STRAT_NORMAL. 779 * => policy decision: it is more important to pull a page off of the 780 * appropriate priority free list than it is to get a zero'd or 781 * unknown contents page. This is because we live with the 782 * consequences of a bad free list decision for the entire 783 * lifetime of the page, e.g. if the page comes from memory that 784 * is slower to access. 785 */ 786 787struct vm_page * 788uvm_pagealloc_strat(struct uvm_object *obj, voff_t off, struct vm_anon *anon, 789 int flags, int strat, int free_list) 790{ 791 struct pglist pgl; 792 int pmr_flags; 793 struct vm_page *pg; 794 boolean_t use_reserve; 795 UVMHIST_FUNC("uvm_pagealloc_strat"); UVMHIST_CALLED(pghist); 796 797 KASSERT(obj == NULL || anon == NULL); 798 KASSERT(off == trunc_page(off)); 799 800 /* 801 * check to see if we need to generate some free pages waking 802 * the pagedaemon. 803 */ 804 if ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freemin || 805 ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freetarg && 806 uvmexp.inactive < uvmexp.inactarg)) 807 wakeup(&uvm.pagedaemon_proc); 808 809 /* 810 * fail if any of these conditions is true: 811 * [1] there really are no free pages, or 812 * [2] only kernel "reserved" pages remain and 813 * the page isn't being allocated to a kernel object. 814 * [3] only pagedaemon "reserved" pages remain and 815 * the requestor isn't the pagedaemon. 816 */ 817 818 use_reserve = (flags & UVM_PGA_USERESERVE) || 819 (obj && UVM_OBJ_IS_KERN_OBJECT(obj)); 820 if ((uvmexp.free <= uvmexp.reserve_kernel && !use_reserve) || 821 (uvmexp.free <= uvmexp.reserve_pagedaemon && 822 !((curproc == uvm.pagedaemon_proc) || 823 (curproc == syncerproc)))) 824 goto fail; 825 826 pmr_flags = UVM_PLA_NOWAIT; 827 if (flags & UVM_PGA_ZERO) 828 pmr_flags |= UVM_PLA_ZERO; 829 TAILQ_INIT(&pgl); 830 if (uvm_pmr_getpages(1, 0, 0, 1, 0, 1, pmr_flags, &pgl) != 0) 831 goto fail; 832 pg = TAILQ_FIRST(&pgl); 833 KASSERT(pg != NULL); 834 KASSERT(TAILQ_NEXT(pg, pageq) == NULL); 835 836 pg->offset = off; 837 pg->uobject = obj; 838 pg->uanon = anon; 839 KASSERT((pg->pg_flags & PG_DEV) == 0); 840 pg->pg_flags = PG_BUSY|PG_FAKE; 841 if (!(flags & UVM_PGA_ZERO)) 842 atomic_setbits_int(&pg->pg_flags, PG_CLEAN); 843 if (anon) { 844 anon->an_page = pg; 845 atomic_setbits_int(&pg->pg_flags, PQ_ANON); 846 } else { 847 if (obj) 848 uvm_pageinsert(pg); 849 } 850#if defined(UVM_PAGE_TRKOWN) 851 pg->owner_tag = NULL; 852#endif 853 UVM_PAGE_OWN(pg, "new alloc"); 854 855 UVMHIST_LOG(pghist, "allocated pg %p/%lx", pg, 856 (u_long)VM_PAGE_TO_PHYS(pg), 0, 0); 857 return(pg); 858 859 fail: 860 UVMHIST_LOG(pghist, "failed!", 0, 0, 0, 0); 861 return (NULL); 862} 863 864/* 865 * uvm_pagerealloc: reallocate a page from one object to another 866 * 867 * => both objects must be locked 868 */ 869 870void 871uvm_pagerealloc(struct vm_page *pg, struct uvm_object *newobj, voff_t newoff) 872{ 873 874 UVMHIST_FUNC("uvm_pagerealloc"); UVMHIST_CALLED(pghist); 875 876 /* 877 * remove it from the old object 878 */ 879 880 if (pg->uobject) { 881 uvm_pageremove(pg); 882 } 883 884 /* 885 * put it in the new object 886 */ 887 888 if (newobj) { 889 pg->uobject = newobj; 890 pg->offset = newoff; 891 pg->pg_version++; 892 uvm_pageinsert(pg); 893 } 894} 895 896 897/* 898 * uvm_pagefree: free page 899 * 900 * => erase page's identity (i.e. remove from object) 901 * => put page on free list 902 * => caller must lock owning object (either anon or uvm_object) 903 * => caller must lock page queues 904 * => assumes all valid mappings of pg are gone 905 */ 906 907void 908uvm_pagefree(struct vm_page *pg) 909{ 910 struct pglist pgl; 911 int saved_loan_count = pg->loan_count; 912 UVMHIST_FUNC("uvm_pagefree"); UVMHIST_CALLED(pghist); 913 914#ifdef DEBUG 915 if (pg->uobject == (void *)0xdeadbeef && 916 pg->uanon == (void *)0xdeadbeef) { 917 panic("uvm_pagefree: freeing free page %p", pg); 918 } 919#endif 920 921 UVMHIST_LOG(pghist, "freeing pg %p/%lx", pg, 922 (u_long)VM_PAGE_TO_PHYS(pg), 0, 0); 923 KASSERT((pg->pg_flags & PG_DEV) == 0); 924 925 /* 926 * if the page was an object page (and thus "TABLED"), remove it 927 * from the object. 928 */ 929 930 if (pg->pg_flags & PG_TABLED) { 931 932 /* 933 * if the object page is on loan we are going to drop ownership. 934 * it is possible that an anon will take over as owner for this 935 * page later on. the anon will want a !PG_CLEAN page so that 936 * it knows it needs to allocate swap if it wants to page the 937 * page out. 938 */ 939 940 /* in case an anon takes over */ 941 if (saved_loan_count) 942 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 943 uvm_pageremove(pg); 944 945 /* 946 * if our page was on loan, then we just lost control over it 947 * (in fact, if it was loaned to an anon, the anon may have 948 * already taken over ownership of the page by now and thus 949 * changed the loan_count [e.g. in uvmfault_anonget()]) we just 950 * return (when the last loan is dropped, then the page can be 951 * freed by whatever was holding the last loan). 952 */ 953 954 if (saved_loan_count) 955 return; 956 } else if (saved_loan_count && pg->uanon) { 957 /* 958 * if our page is owned by an anon and is loaned out to the 959 * kernel then we just want to drop ownership and return. 960 * the kernel must free the page when all its loans clear ... 961 * note that the kernel can't change the loan status of our 962 * page as long as we are holding PQ lock. 963 */ 964 atomic_clearbits_int(&pg->pg_flags, PQ_ANON); 965 pg->uanon->an_page = NULL; 966 pg->uanon = NULL; 967 return; 968 } 969 KASSERT(saved_loan_count == 0); 970 971 /* 972 * now remove the page from the queues 973 */ 974 975 if (pg->pg_flags & PQ_ACTIVE) { 976 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 977 atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE); 978 uvmexp.active--; 979 } 980 if (pg->pg_flags & PQ_INACTIVE) { 981 if (pg->pg_flags & PQ_SWAPBACKED) 982 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); 983 else 984 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); 985 atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE); 986 uvmexp.inactive--; 987 } 988 989 /* 990 * if the page was wired, unwire it now. 991 */ 992 993 if (pg->wire_count) { 994 pg->wire_count = 0; 995 uvmexp.wired--; 996 } 997 if (pg->uanon) { 998 atomic_clearbits_int(&pg->pg_flags, PQ_ANON); 999 pg->uanon->an_page = NULL; 1000 pg->uanon = NULL; 1001#ifdef UBC 1002 uvm_pgcnt_anon--; 1003#endif 1004 } 1005 1006 /* 1007 * Clean page state bits. 1008 */ 1009 atomic_clearbits_int(&pg->pg_flags, PG_ZERO|PG_FAKE|PG_BUSY| 1010 PG_RELEASED|PG_CLEAN|PG_CLEANCHK|PQ_ENCRYPT); 1011 /* 1012 * Pmap flag cleaning. 1013 * XXX: Shouldn't pmap do this? 1014 */ 1015 atomic_clearbits_int(&pg->pg_flags, 1016 PG_PMAP0|PG_PMAP1|PG_PMAP2|PG_PMAP3); 1017 1018#if defined(DIAGNOSTIC) 1019 if (pg->pg_flags != 0) { 1020 panic("uvm_pagefree: expected page %p pg_flags to be 0\n" 1021 "uvm_pagefree: instead of pg->pg_flags = %x\n", 1022 VM_PAGE_TO_PHYS(pg), pg->pg_flags); 1023 } 1024#endif 1025#ifdef DEBUG 1026 pg->uobject = (void *)0xdeadbeef; 1027 pg->offset = 0xdeadbeef; 1028 pg->uanon = (void *)0xdeadbeef; 1029#endif 1030 TAILQ_INIT(&pgl); 1031 TAILQ_INSERT_HEAD(&pgl, pg, pageq); 1032 uvm_pmr_freepageq(&pgl); 1033 1034 if (uvmexp.zeropages < UVM_PAGEZERO_TARGET) 1035 uvm.page_idle_zero = vm_page_zero_enable; 1036} 1037 1038/* 1039 * uvm_page_unbusy: unbusy an array of pages. 1040 * 1041 * => pages must either all belong to the same object, or all belong to anons. 1042 * => if pages are object-owned, object must be locked. 1043 * => if pages are anon-owned, anons must be unlockd and have 0 refcount. 1044 */ 1045 1046void 1047uvm_page_unbusy(struct vm_page **pgs, int npgs) 1048{ 1049 struct vm_page *pg; 1050 struct uvm_object *uobj; 1051 int i; 1052 UVMHIST_FUNC("uvm_page_unbusy"); UVMHIST_CALLED(pdhist); 1053 1054 for (i = 0; i < npgs; i++) { 1055 pg = pgs[i]; 1056 1057 if (pg == NULL || pg == PGO_DONTCARE) { 1058 continue; 1059 } 1060 if (pg->pg_flags & PG_WANTED) { 1061 wakeup(pg); 1062 } 1063 if (pg->pg_flags & PG_RELEASED) { 1064 UVMHIST_LOG(pdhist, "releasing pg %p", pg,0,0,0); 1065 uobj = pg->uobject; 1066 if (uobj != NULL) { 1067 uvm_lock_pageq(); 1068 pmap_page_protect(pg, VM_PROT_NONE); 1069 /* XXX won't happen right now */ 1070 if (pg->pg_flags & PQ_ANON) 1071 uao_dropswap(uobj, 1072 pg->offset >> PAGE_SHIFT); 1073 uvm_pagefree(pg); 1074 uvm_unlock_pageq(); 1075 } else { 1076 atomic_clearbits_int(&pg->pg_flags, PG_BUSY); 1077 UVM_PAGE_OWN(pg, NULL); 1078 uvm_anfree(pg->uanon); 1079 } 1080 } else { 1081 UVMHIST_LOG(pdhist, "unbusying pg %p", pg,0,0,0); 1082 atomic_clearbits_int(&pg->pg_flags, PG_WANTED|PG_BUSY); 1083 UVM_PAGE_OWN(pg, NULL); 1084 } 1085 } 1086} 1087 1088#if defined(UVM_PAGE_TRKOWN) 1089/* 1090 * uvm_page_own: set or release page ownership 1091 * 1092 * => this is a debugging function that keeps track of who sets PG_BUSY 1093 * and where they do it. it can be used to track down problems 1094 * such a process setting "PG_BUSY" and never releasing it. 1095 * => page's object [if any] must be locked 1096 * => if "tag" is NULL then we are releasing page ownership 1097 */ 1098void 1099uvm_page_own(struct vm_page *pg, char *tag) 1100{ 1101 /* gain ownership? */ 1102 if (tag) { 1103 if (pg->owner_tag) { 1104 printf("uvm_page_own: page %p already owned " 1105 "by proc %d [%s]\n", pg, 1106 pg->owner, pg->owner_tag); 1107 panic("uvm_page_own"); 1108 } 1109 pg->owner = (curproc) ? curproc->p_pid : (pid_t) -1; 1110 pg->owner_tag = tag; 1111 return; 1112 } 1113 1114 /* drop ownership */ 1115 if (pg->owner_tag == NULL) { 1116 printf("uvm_page_own: dropping ownership of an non-owned " 1117 "page (%p)\n", pg); 1118 panic("uvm_page_own"); 1119 } 1120 pg->owner_tag = NULL; 1121 return; 1122} 1123#endif 1124 1125/* 1126 * uvm_pageidlezero: zero free pages while the system is idle. 1127 * 1128 * => we do at least one iteration per call, if we are below the target. 1129 * => we loop until we either reach the target or whichqs indicates that 1130 * there is a process ready to run. 1131 */ 1132void 1133uvm_pageidlezero(void) 1134{ 1135#if 0 /* Disabled for now. */ 1136 struct vm_page *pg; 1137 struct pgfreelist *pgfl; 1138 int free_list; 1139 UVMHIST_FUNC("uvm_pageidlezero"); UVMHIST_CALLED(pghist); 1140 1141 do { 1142 uvm_lock_fpageq(); 1143 1144 if (uvmexp.zeropages >= UVM_PAGEZERO_TARGET) { 1145 uvm.page_idle_zero = FALSE; 1146 uvm_unlock_fpageq(); 1147 return; 1148 } 1149 1150 for (free_list = 0; free_list < VM_NFREELIST; free_list++) { 1151 pgfl = &uvm.page_free[free_list]; 1152 if ((pg = TAILQ_FIRST(&pgfl->pgfl_queues[ 1153 PGFL_UNKNOWN])) != NULL) 1154 break; 1155 } 1156 1157 if (pg == NULL) { 1158 /* 1159 * No non-zero'd pages; don't bother trying again 1160 * until we know we have non-zero'd pages free. 1161 */ 1162 uvm.page_idle_zero = FALSE; 1163 uvm_unlock_fpageq(); 1164 return; 1165 } 1166 1167 TAILQ_REMOVE(&pgfl->pgfl_queues[PGFL_UNKNOWN], pg, pageq); 1168 uvmexp.free--; 1169 uvm_unlock_fpageq(); 1170 1171#ifdef PMAP_PAGEIDLEZERO 1172 if (PMAP_PAGEIDLEZERO(pg) == FALSE) { 1173 /* 1174 * The machine-dependent code detected some 1175 * reason for us to abort zeroing pages, 1176 * probably because there is a process now 1177 * ready to run. 1178 */ 1179 uvm_lock_fpageq(); 1180 TAILQ_INSERT_HEAD(&pgfl->pgfl_queues[PGFL_UNKNOWN], 1181 pg, pageq); 1182 uvmexp.free++; 1183 uvmexp.zeroaborts++; 1184 uvm_unlock_fpageq(); 1185 return; 1186 } 1187#else 1188 /* 1189 * XXX This will toast the cache unless the pmap_zero_page() 1190 * XXX implementation does uncached access. 1191 */ 1192 pmap_zero_page(pg); 1193#endif 1194 atomic_setbits_int(&pg->pg_flags, PG_ZERO); 1195 1196 uvm_lock_fpageq(); 1197 TAILQ_INSERT_HEAD(&pgfl->pgfl_queues[PGFL_ZEROS], pg, pageq); 1198 uvmexp.free++; 1199 uvmexp.zeropages++; 1200 uvm_unlock_fpageq(); 1201 } while (curcpu_is_idle()); 1202#endif /* 0 */ 1203} 1204 1205/* 1206 * when VM_PHYSSEG_MAX is 1, we can simplify these functions 1207 */ 1208 1209#if VM_PHYSSEG_MAX > 1 1210/* 1211 * vm_physseg_find: find vm_physseg structure that belongs to a PA 1212 */ 1213int 1214vm_physseg_find(paddr_t pframe, int *offp) 1215{ 1216 1217#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 1218 /* binary search for it */ 1219 int start, len, try; 1220 1221 /* 1222 * if try is too large (thus target is less than than try) we reduce 1223 * the length to trunc(len/2) [i.e. everything smaller than "try"] 1224 * 1225 * if the try is too small (thus target is greater than try) then 1226 * we set the new start to be (try + 1). this means we need to 1227 * reduce the length to (round(len/2) - 1). 1228 * 1229 * note "adjust" below which takes advantage of the fact that 1230 * (round(len/2) - 1) == trunc((len - 1) / 2) 1231 * for any value of len we may have 1232 */ 1233 1234 for (start = 0, len = vm_nphysseg ; len != 0 ; len = len / 2) { 1235 try = start + (len / 2); /* try in the middle */ 1236 1237 /* start past our try? */ 1238 if (pframe >= vm_physmem[try].start) { 1239 /* was try correct? */ 1240 if (pframe < vm_physmem[try].end) { 1241 if (offp) 1242 *offp = pframe - vm_physmem[try].start; 1243 return(try); /* got it */ 1244 } 1245 start = try + 1; /* next time, start here */ 1246 len--; /* "adjust" */ 1247 } else { 1248 /* 1249 * pframe before try, just reduce length of 1250 * region, done in "for" loop 1251 */ 1252 } 1253 } 1254 return(-1); 1255 1256#else 1257 /* linear search for it */ 1258 int lcv; 1259 1260 for (lcv = 0; lcv < vm_nphysseg; lcv++) { 1261 if (pframe >= vm_physmem[lcv].start && 1262 pframe < vm_physmem[lcv].end) { 1263 if (offp) 1264 *offp = pframe - vm_physmem[lcv].start; 1265 return(lcv); /* got it */ 1266 } 1267 } 1268 return(-1); 1269 1270#endif 1271} 1272 1273/* 1274 * PHYS_TO_VM_PAGE: find vm_page for a PA. used by MI code to get vm_pages 1275 * back from an I/O mapping (ugh!). used in some MD code as well. 1276 */ 1277struct vm_page * 1278PHYS_TO_VM_PAGE(paddr_t pa) 1279{ 1280 paddr_t pf = atop(pa); 1281 int off; 1282 int psi; 1283 1284 psi = vm_physseg_find(pf, &off); 1285 1286 return ((psi == -1) ? NULL : &vm_physmem[psi].pgs[off]); 1287} 1288#endif /* VM_PHYSSEG_MAX > 1 */ 1289 1290/* 1291 * uvm_pagelookup: look up a page 1292 * 1293 * => caller should lock object to keep someone from pulling the page 1294 * out from under it 1295 */ 1296struct vm_page * 1297uvm_pagelookup(struct uvm_object *obj, voff_t off) 1298{ 1299 struct vm_page find; 1300 1301 find.offset = off; 1302 return (RB_FIND(uobj_pgs, &obj->memt, &find)); 1303} 1304 1305/* 1306 * uvm_pagewire: wire the page, thus removing it from the daemon's grasp 1307 * 1308 * => caller must lock page queues 1309 */ 1310void 1311uvm_pagewire(struct vm_page *pg) 1312{ 1313 if (pg->wire_count == 0) { 1314 if (pg->pg_flags & PQ_ACTIVE) { 1315 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1316 atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE); 1317 uvmexp.active--; 1318 } 1319 if (pg->pg_flags & PQ_INACTIVE) { 1320 if (pg->pg_flags & PQ_SWAPBACKED) 1321 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); 1322 else 1323 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); 1324 atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE); 1325 uvmexp.inactive--; 1326 } 1327 uvmexp.wired++; 1328 } 1329 pg->wire_count++; 1330} 1331 1332/* 1333 * uvm_pageunwire: unwire the page. 1334 * 1335 * => activate if wire count goes to zero. 1336 * => caller must lock page queues 1337 */ 1338void 1339uvm_pageunwire(struct vm_page *pg) 1340{ 1341 pg->wire_count--; 1342 if (pg->wire_count == 0) { 1343 TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq); 1344 uvmexp.active++; 1345 atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE); 1346 uvmexp.wired--; 1347 } 1348} 1349 1350/* 1351 * uvm_pagedeactivate: deactivate page -- no pmaps have access to page 1352 * 1353 * => caller must lock page queues 1354 * => caller must check to make sure page is not wired 1355 * => object that page belongs to must be locked (so we can adjust pg->flags) 1356 */ 1357void 1358uvm_pagedeactivate(struct vm_page *pg) 1359{ 1360 pmap_page_protect(pg, VM_PROT_NONE); 1361 1362 if (pg->pg_flags & PQ_ACTIVE) { 1363 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1364 atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE); 1365 uvmexp.active--; 1366 } 1367 if ((pg->pg_flags & PQ_INACTIVE) == 0) { 1368 KASSERT(pg->wire_count == 0); 1369 if (pg->pg_flags & PQ_SWAPBACKED) 1370 TAILQ_INSERT_TAIL(&uvm.page_inactive_swp, pg, pageq); 1371 else 1372 TAILQ_INSERT_TAIL(&uvm.page_inactive_obj, pg, pageq); 1373 atomic_setbits_int(&pg->pg_flags, PQ_INACTIVE); 1374 uvmexp.inactive++; 1375 pmap_clear_reference(pg); 1376 /* 1377 * update the "clean" bit. this isn't 100% 1378 * accurate, and doesn't have to be. we'll 1379 * re-sync it after we zap all mappings when 1380 * scanning the inactive list. 1381 */ 1382 if ((pg->pg_flags & PG_CLEAN) != 0 && 1383 pmap_is_modified(pg)) 1384 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 1385 } 1386} 1387 1388/* 1389 * uvm_pageactivate: activate page 1390 * 1391 * => caller must lock page queues 1392 */ 1393void 1394uvm_pageactivate(struct vm_page *pg) 1395{ 1396 if (pg->pg_flags & PQ_INACTIVE) { 1397 if (pg->pg_flags & PQ_SWAPBACKED) 1398 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); 1399 else 1400 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); 1401 atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE); 1402 uvmexp.inactive--; 1403 } 1404 if (pg->wire_count == 0) { 1405 1406 /* 1407 * if page is already active, remove it from list so we 1408 * can put it at tail. if it wasn't active, then mark 1409 * it active and bump active count 1410 */ 1411 if (pg->pg_flags & PQ_ACTIVE) 1412 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1413 else { 1414 atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE); 1415 uvmexp.active++; 1416 } 1417 1418 TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq); 1419 } 1420} 1421 1422/* 1423 * uvm_pagezero: zero fill a page 1424 * 1425 * => if page is part of an object then the object should be locked 1426 * to protect pg->flags. 1427 */ 1428void 1429uvm_pagezero(struct vm_page *pg) 1430{ 1431 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 1432 pmap_zero_page(pg); 1433} 1434 1435/* 1436 * uvm_pagecopy: copy a page 1437 * 1438 * => if page is part of an object then the object should be locked 1439 * to protect pg->flags. 1440 */ 1441void 1442uvm_pagecopy(struct vm_page *src, struct vm_page *dst) 1443{ 1444 atomic_clearbits_int(&dst->pg_flags, PG_CLEAN); 1445 pmap_copy_page(src, dst); 1446} 1447 1448/* 1449 * uvm_page_lookup_freelist: look up the free list for the specified page 1450 */ 1451int 1452uvm_page_lookup_freelist(struct vm_page *pg) 1453{ 1454#if VM_PHYSSEG_MAX == 1 1455 return (vm_physmem[0].free_list); 1456#else 1457 int lcv; 1458 1459 lcv = vm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), NULL); 1460 KASSERT(lcv != -1); 1461 return (vm_physmem[lcv].free_list); 1462#endif 1463} 1464