uvm_page.c revision 1.103
1/* $OpenBSD: uvm_page.c,v 1.103 2011/04/02 12:38:37 ariane Exp $ */ 2/* $NetBSD: uvm_page.c,v 1.44 2000/11/27 08:40:04 chs Exp $ */ 3 4/* 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * Copyright (c) 1991, 1993, The Regents of the University of California. 7 * 8 * All rights reserved. 9 * 10 * This code is derived from software contributed to Berkeley by 11 * The Mach Operating System project at Carnegie-Mellon University. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by Charles D. Cranor, 24 * Washington University, the University of California, Berkeley and 25 * its contributors. 26 * 4. Neither the name of the University nor the names of its contributors 27 * may be used to endorse or promote products derived from this software 28 * without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 * 42 * @(#)vm_page.c 8.3 (Berkeley) 3/21/94 43 * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp 44 * 45 * 46 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 47 * All rights reserved. 48 * 49 * Permission to use, copy, modify and distribute this software and 50 * its documentation is hereby granted, provided that both the copyright 51 * notice and this permission notice appear in all copies of the 52 * software, derivative works or modified versions, and any portions 53 * thereof, and that both notices appear in supporting documentation. 54 * 55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 56 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 58 * 59 * Carnegie Mellon requests users of this software to return to 60 * 61 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 62 * School of Computer Science 63 * Carnegie Mellon University 64 * Pittsburgh PA 15213-3890 65 * 66 * any improvements or extensions that they make and grant Carnegie the 67 * rights to redistribute these changes. 68 */ 69 70/* 71 * uvm_page.c: page ops. 72 */ 73 74#include <sys/param.h> 75#include <sys/systm.h> 76#include <sys/sched.h> 77#include <sys/kernel.h> 78#include <sys/vnode.h> 79#include <sys/mount.h> 80#include <sys/proc.h> 81 82#include <uvm/uvm.h> 83 84/* 85 * for object trees 86 */ 87RB_GENERATE(uvm_objtree, vm_page, objt, uvm_pagecmp); 88 89int 90uvm_pagecmp(struct vm_page *a, struct vm_page *b) 91{ 92 return (a->offset < b->offset ? -1 : a->offset > b->offset); 93} 94 95/* 96 * global vars... XXXCDC: move to uvm. structure. 97 */ 98 99/* 100 * physical memory config is stored in vm_physmem. 101 */ 102 103struct vm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */ 104int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */ 105 106/* 107 * Some supported CPUs in a given architecture don't support all 108 * of the things necessary to do idle page zero'ing efficiently. 109 * We therefore provide a way to disable it from machdep code here. 110 */ 111 112/* 113 * XXX disabled until we can find a way to do this without causing 114 * problems for either cpu caches or DMA latency. 115 */ 116boolean_t vm_page_zero_enable = FALSE; 117 118/* 119 * local variables 120 */ 121 122/* 123 * these variables record the values returned by vm_page_bootstrap, 124 * for debugging purposes. The implementation of uvm_pageboot_alloc 125 * and pmap_startup here also uses them internally. 126 */ 127 128static vaddr_t virtual_space_start; 129static vaddr_t virtual_space_end; 130 131/* 132 * History 133 */ 134UVMHIST_DECL(pghist); 135 136/* 137 * local prototypes 138 */ 139 140static void uvm_pageinsert(struct vm_page *); 141static void uvm_pageremove(struct vm_page *); 142 143/* 144 * inline functions 145 */ 146 147/* 148 * uvm_pageinsert: insert a page in the object 149 * 150 * => caller must lock object 151 * => caller must lock page queues XXX questionable 152 * => call should have already set pg's object and offset pointers 153 * and bumped the version counter 154 */ 155 156__inline static void 157uvm_pageinsert(struct vm_page *pg) 158{ 159 UVMHIST_FUNC("uvm_pageinsert"); UVMHIST_CALLED(pghist); 160 161 KASSERT((pg->pg_flags & PG_TABLED) == 0); 162 /* XXX should we check duplicates? */ 163 RB_INSERT(uvm_objtree, &pg->uobject->memt, pg); 164 atomic_setbits_int(&pg->pg_flags, PG_TABLED); 165 pg->uobject->uo_npages++; 166} 167 168/* 169 * uvm_page_remove: remove page from object 170 * 171 * => caller must lock object 172 * => caller must lock page queues 173 */ 174 175static __inline void 176uvm_pageremove(struct vm_page *pg) 177{ 178 UVMHIST_FUNC("uvm_pageremove"); UVMHIST_CALLED(pghist); 179 180 KASSERT(pg->pg_flags & PG_TABLED); 181 RB_REMOVE(uvm_objtree, &pg->uobject->memt, pg); 182 183 atomic_clearbits_int(&pg->pg_flags, PG_TABLED); 184 pg->uobject->uo_npages--; 185 pg->uobject = NULL; 186 pg->pg_version++; 187} 188 189/* 190 * uvm_page_init: init the page system. called from uvm_init(). 191 * 192 * => we return the range of kernel virtual memory in kvm_startp/kvm_endp 193 */ 194 195void 196uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp) 197{ 198 vsize_t freepages, pagecount, n; 199 vm_page_t pagearray; 200 int lcv, i; 201 paddr_t paddr; 202#if defined(UVMHIST) 203 static struct uvm_history_ent pghistbuf[100]; 204#endif 205 206 UVMHIST_FUNC("uvm_page_init"); 207 UVMHIST_INIT_STATIC(pghist, pghistbuf); 208 UVMHIST_CALLED(pghist); 209 210 /* 211 * init the page queues and page queue locks 212 */ 213 214 TAILQ_INIT(&uvm.page_active); 215 TAILQ_INIT(&uvm.page_inactive_swp); 216 TAILQ_INIT(&uvm.page_inactive_obj); 217 simple_lock_init(&uvm.pageqlock); 218 mtx_init(&uvm.fpageqlock, IPL_VM); 219 uvm_pmr_init(); 220 221 /* 222 * allocate vm_page structures. 223 */ 224 225 /* 226 * sanity check: 227 * before calling this function the MD code is expected to register 228 * some free RAM with the uvm_page_physload() function. our job 229 * now is to allocate vm_page structures for this memory. 230 */ 231 232 if (vm_nphysseg == 0) 233 panic("uvm_page_bootstrap: no memory pre-allocated"); 234 235 /* 236 * first calculate the number of free pages... 237 * 238 * note that we use start/end rather than avail_start/avail_end. 239 * this allows us to allocate extra vm_page structures in case we 240 * want to return some memory to the pool after booting. 241 */ 242 243 freepages = 0; 244 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 245 freepages += (vm_physmem[lcv].end - vm_physmem[lcv].start); 246 247 /* 248 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can 249 * use. for each page of memory we use we need a vm_page structure. 250 * thus, the total number of pages we can use is the total size of 251 * the memory divided by the PAGE_SIZE plus the size of the vm_page 252 * structure. we add one to freepages as a fudge factor to avoid 253 * truncation errors (since we can only allocate in terms of whole 254 * pages). 255 */ 256 257 pagecount = (((paddr_t)freepages + 1) << PAGE_SHIFT) / 258 (PAGE_SIZE + sizeof(struct vm_page)); 259 pagearray = (vm_page_t)uvm_pageboot_alloc(pagecount * 260 sizeof(struct vm_page)); 261 memset(pagearray, 0, pagecount * sizeof(struct vm_page)); 262 263 /* 264 * init the vm_page structures and put them in the correct place. 265 */ 266 267 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) { 268 n = vm_physmem[lcv].end - vm_physmem[lcv].start; 269 if (n > pagecount) { 270 panic("uvm_page_init: lost %ld page(s) in init", 271 (long)(n - pagecount)); 272 /* XXXCDC: shouldn't happen? */ 273 /* n = pagecount; */ 274 } 275 276 /* set up page array pointers */ 277 vm_physmem[lcv].pgs = pagearray; 278 pagearray += n; 279 pagecount -= n; 280 vm_physmem[lcv].lastpg = vm_physmem[lcv].pgs + (n - 1); 281 282 /* init and free vm_pages (we've already zeroed them) */ 283 paddr = ptoa(vm_physmem[lcv].start); 284 for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) { 285 vm_physmem[lcv].pgs[i].phys_addr = paddr; 286#ifdef __HAVE_VM_PAGE_MD 287 VM_MDPAGE_INIT(&vm_physmem[lcv].pgs[i]); 288#endif 289 if (atop(paddr) >= vm_physmem[lcv].avail_start && 290 atop(paddr) <= vm_physmem[lcv].avail_end) { 291 uvmexp.npages++; 292 } 293 } 294 295 /* 296 * Add pages to free pool. 297 */ 298 uvm_pmr_freepages(&vm_physmem[lcv].pgs[ 299 vm_physmem[lcv].avail_start - vm_physmem[lcv].start], 300 vm_physmem[lcv].avail_end - vm_physmem[lcv].avail_start); 301 } 302 303 /* 304 * pass up the values of virtual_space_start and 305 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper 306 * layers of the VM. 307 */ 308 309 *kvm_startp = round_page(virtual_space_start); 310 *kvm_endp = trunc_page(virtual_space_end); 311 312 /* 313 * init locks for kernel threads 314 */ 315 mtx_init(&uvm.aiodoned_lock, IPL_BIO); 316 317 /* 318 * init reserve thresholds 319 * XXXCDC - values may need adjusting 320 */ 321 uvmexp.reserve_pagedaemon = 4; 322 uvmexp.reserve_kernel = 6; 323 uvmexp.anonminpct = 10; 324 uvmexp.vnodeminpct = 10; 325 uvmexp.vtextminpct = 5; 326 uvmexp.anonmin = uvmexp.anonminpct * 256 / 100; 327 uvmexp.vnodemin = uvmexp.vnodeminpct * 256 / 100; 328 uvmexp.vtextmin = uvmexp.vtextminpct * 256 / 100; 329 330 /* 331 * determine if we should zero pages in the idle loop. 332 */ 333 334 uvm.page_idle_zero = vm_page_zero_enable; 335 336 /* 337 * done! 338 */ 339 340 uvm.page_init_done = TRUE; 341} 342 343/* 344 * uvm_setpagesize: set the page size 345 * 346 * => sets page_shift and page_mask from uvmexp.pagesize. 347 */ 348 349void 350uvm_setpagesize(void) 351{ 352 if (uvmexp.pagesize == 0) 353 uvmexp.pagesize = DEFAULT_PAGE_SIZE; 354 uvmexp.pagemask = uvmexp.pagesize - 1; 355 if ((uvmexp.pagemask & uvmexp.pagesize) != 0) 356 panic("uvm_setpagesize: page size not a power of two"); 357 for (uvmexp.pageshift = 0; ; uvmexp.pageshift++) 358 if ((1 << uvmexp.pageshift) == uvmexp.pagesize) 359 break; 360} 361 362/* 363 * uvm_pageboot_alloc: steal memory from physmem for bootstrapping 364 */ 365 366vaddr_t 367uvm_pageboot_alloc(vsize_t size) 368{ 369#if defined(PMAP_STEAL_MEMORY) 370 vaddr_t addr; 371 372 /* 373 * defer bootstrap allocation to MD code (it may want to allocate 374 * from a direct-mapped segment). pmap_steal_memory should round 375 * off virtual_space_start/virtual_space_end. 376 */ 377 378 addr = pmap_steal_memory(size, &virtual_space_start, 379 &virtual_space_end); 380 381 return(addr); 382 383#else /* !PMAP_STEAL_MEMORY */ 384 385 static boolean_t initialized = FALSE; 386 vaddr_t addr, vaddr; 387 paddr_t paddr; 388 389 /* round to page size */ 390 size = round_page(size); 391 392 /* 393 * on first call to this function, initialize ourselves. 394 */ 395 if (initialized == FALSE) { 396 pmap_virtual_space(&virtual_space_start, &virtual_space_end); 397 398 /* round it the way we like it */ 399 virtual_space_start = round_page(virtual_space_start); 400 virtual_space_end = trunc_page(virtual_space_end); 401 402 initialized = TRUE; 403 } 404 405 /* 406 * allocate virtual memory for this request 407 */ 408 if (virtual_space_start == virtual_space_end || 409 (virtual_space_end - virtual_space_start) < size) 410 panic("uvm_pageboot_alloc: out of virtual space"); 411 412 addr = virtual_space_start; 413 414#ifdef PMAP_GROWKERNEL 415 /* 416 * If the kernel pmap can't map the requested space, 417 * then allocate more resources for it. 418 */ 419 if (uvm_maxkaddr < (addr + size)) { 420 uvm_maxkaddr = pmap_growkernel(addr + size); 421 if (uvm_maxkaddr < (addr + size)) 422 panic("uvm_pageboot_alloc: pmap_growkernel() failed"); 423 } 424#endif 425 426 virtual_space_start += size; 427 428 /* 429 * allocate and mapin physical pages to back new virtual pages 430 */ 431 432 for (vaddr = round_page(addr) ; vaddr < addr + size ; 433 vaddr += PAGE_SIZE) { 434 435 if (!uvm_page_physget(&paddr)) 436 panic("uvm_pageboot_alloc: out of memory"); 437 438 /* 439 * Note this memory is no longer managed, so using 440 * pmap_kenter is safe. 441 */ 442 pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE); 443 } 444 pmap_update(pmap_kernel()); 445 return(addr); 446#endif /* PMAP_STEAL_MEMORY */ 447} 448 449#if !defined(PMAP_STEAL_MEMORY) 450/* 451 * uvm_page_physget: "steal" one page from the vm_physmem structure. 452 * 453 * => attempt to allocate it off the end of a segment in which the "avail" 454 * values match the start/end values. if we can't do that, then we 455 * will advance both values (making them equal, and removing some 456 * vm_page structures from the non-avail area). 457 * => return false if out of memory. 458 */ 459 460/* subroutine: try to allocate from memory chunks on the specified freelist */ 461static boolean_t uvm_page_physget_freelist(paddr_t *, int); 462 463static boolean_t 464uvm_page_physget_freelist(paddr_t *paddrp, int freelist) 465{ 466 int lcv, x; 467 UVMHIST_FUNC("uvm_page_physget_freelist"); UVMHIST_CALLED(pghist); 468 469 /* pass 1: try allocating from a matching end */ 470#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \ 471 (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 472 for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--) 473#else 474 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 475#endif 476 { 477 478 if (uvm.page_init_done == TRUE) 479 panic("uvm_page_physget: called _after_ bootstrap"); 480 481 if (vm_physmem[lcv].free_list != freelist) 482 continue; 483 484 /* try from front */ 485 if (vm_physmem[lcv].avail_start == vm_physmem[lcv].start && 486 vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) { 487 *paddrp = ptoa(vm_physmem[lcv].avail_start); 488 vm_physmem[lcv].avail_start++; 489 vm_physmem[lcv].start++; 490 /* nothing left? nuke it */ 491 if (vm_physmem[lcv].avail_start == 492 vm_physmem[lcv].end) { 493 if (vm_nphysseg == 1) 494 panic("uvm_page_physget: out of memory!"); 495 vm_nphysseg--; 496 for (x = lcv ; x < vm_nphysseg ; x++) 497 /* structure copy */ 498 vm_physmem[x] = vm_physmem[x+1]; 499 } 500 return (TRUE); 501 } 502 503 /* try from rear */ 504 if (vm_physmem[lcv].avail_end == vm_physmem[lcv].end && 505 vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) { 506 *paddrp = ptoa(vm_physmem[lcv].avail_end - 1); 507 vm_physmem[lcv].avail_end--; 508 vm_physmem[lcv].end--; 509 /* nothing left? nuke it */ 510 if (vm_physmem[lcv].avail_end == 511 vm_physmem[lcv].start) { 512 if (vm_nphysseg == 1) 513 panic("uvm_page_physget: out of memory!"); 514 vm_nphysseg--; 515 for (x = lcv ; x < vm_nphysseg ; x++) 516 /* structure copy */ 517 vm_physmem[x] = vm_physmem[x+1]; 518 } 519 return (TRUE); 520 } 521 } 522 523 /* pass2: forget about matching ends, just allocate something */ 524#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \ 525 (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 526 for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--) 527#else 528 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 529#endif 530 { 531 532 /* any room in this bank? */ 533 if (vm_physmem[lcv].avail_start >= vm_physmem[lcv].avail_end) 534 continue; /* nope */ 535 536 *paddrp = ptoa(vm_physmem[lcv].avail_start); 537 vm_physmem[lcv].avail_start++; 538 /* truncate! */ 539 vm_physmem[lcv].start = vm_physmem[lcv].avail_start; 540 541 /* nothing left? nuke it */ 542 if (vm_physmem[lcv].avail_start == vm_physmem[lcv].end) { 543 if (vm_nphysseg == 1) 544 panic("uvm_page_physget: out of memory!"); 545 vm_nphysseg--; 546 for (x = lcv ; x < vm_nphysseg ; x++) 547 /* structure copy */ 548 vm_physmem[x] = vm_physmem[x+1]; 549 } 550 return (TRUE); 551 } 552 553 return (FALSE); /* whoops! */ 554} 555 556boolean_t 557uvm_page_physget(paddr_t *paddrp) 558{ 559 int i; 560 UVMHIST_FUNC("uvm_page_physget"); UVMHIST_CALLED(pghist); 561 562 /* try in the order of freelist preference */ 563 for (i = 0; i < VM_NFREELIST; i++) 564 if (uvm_page_physget_freelist(paddrp, i) == TRUE) 565 return (TRUE); 566 return (FALSE); 567} 568#endif /* PMAP_STEAL_MEMORY */ 569 570/* 571 * uvm_page_physload: load physical memory into VM system 572 * 573 * => all args are PFs 574 * => all pages in start/end get vm_page structures 575 * => areas marked by avail_start/avail_end get added to the free page pool 576 * => we are limited to VM_PHYSSEG_MAX physical memory segments 577 */ 578 579void 580uvm_page_physload_flags(paddr_t start, paddr_t end, paddr_t avail_start, 581 paddr_t avail_end, int free_list, int flags) 582{ 583 int preload, lcv; 584 psize_t npages; 585 struct vm_page *pgs; 586 struct vm_physseg *ps; 587 588 if (uvmexp.pagesize == 0) 589 panic("uvm_page_physload: page size not set!"); 590 591 if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT) 592 panic("uvm_page_physload: bad free list %d", free_list); 593 594 if (start >= end) 595 panic("uvm_page_physload: start >= end"); 596 597 /* 598 * do we have room? 599 */ 600 if (vm_nphysseg == VM_PHYSSEG_MAX) { 601 printf("uvm_page_physload: unable to load physical memory " 602 "segment\n"); 603 printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n", 604 VM_PHYSSEG_MAX, (long long)start, (long long)end); 605 printf("\tincrease VM_PHYSSEG_MAX\n"); 606 return; 607 } 608 609 /* 610 * check to see if this is a "preload" (i.e. uvm_mem_init hasn't been 611 * called yet, so malloc is not available). 612 */ 613 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) { 614 if (vm_physmem[lcv].pgs) 615 break; 616 } 617 preload = (lcv == vm_nphysseg); 618 619 /* 620 * if VM is already running, attempt to malloc() vm_page structures 621 */ 622 if (!preload) { 623 /* 624 * XXXCDC: need some sort of lockout for this case 625 * right now it is only used by devices so it should be alright. 626 */ 627 paddr_t paddr; 628 629 npages = end - start; /* # of pages */ 630 631 pgs = (struct vm_page *)uvm_km_zalloc(kernel_map, 632 npages * sizeof(*pgs)); 633 if (pgs == NULL) { 634 printf("uvm_page_physload: can not malloc vm_page " 635 "structs for segment\n"); 636 printf("\tignoring 0x%lx -> 0x%lx\n", start, end); 637 return; 638 } 639 /* init phys_addr and free pages, XXX uvmexp.npages */ 640 for (lcv = 0, paddr = ptoa(start); lcv < npages; 641 lcv++, paddr += PAGE_SIZE) { 642 pgs[lcv].phys_addr = paddr; 643#ifdef __HAVE_VM_PAGE_MD 644 VM_MDPAGE_INIT(&pgs[lcv]); 645#endif 646 if (atop(paddr) >= avail_start && 647 atop(paddr) <= avail_end) { 648 if (flags & PHYSLOAD_DEVICE) { 649 atomic_setbits_int(&pgs[lcv].pg_flags, 650 PG_DEV); 651 pgs[lcv].wire_count = 1; 652 } else { 653#if defined(VM_PHYSSEG_NOADD) 654 panic("uvm_page_physload: tried to add RAM after vm_mem_init"); 655#endif 656 } 657 } 658 } 659 660 /* 661 * Add pages to free pool. 662 */ 663 if ((flags & PHYSLOAD_DEVICE) == 0) { 664 uvm_pmr_freepages(&pgs[avail_start - start], 665 avail_end - avail_start); 666 } 667 668 /* XXXCDC: need hook to tell pmap to rebuild pv_list, etc... */ 669 } else { 670 671 /* gcc complains if these don't get init'd */ 672 pgs = NULL; 673 npages = 0; 674 675 } 676 677 /* 678 * now insert us in the proper place in vm_physmem[] 679 */ 680 681#if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM) 682 683 /* random: put it at the end (easy!) */ 684 ps = &vm_physmem[vm_nphysseg]; 685 686#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 687 688 { 689 int x; 690 /* sort by address for binary search */ 691 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 692 if (start < vm_physmem[lcv].start) 693 break; 694 ps = &vm_physmem[lcv]; 695 /* move back other entries, if necessary ... */ 696 for (x = vm_nphysseg ; x > lcv ; x--) 697 /* structure copy */ 698 vm_physmem[x] = vm_physmem[x - 1]; 699 } 700 701#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) 702 703 { 704 int x; 705 /* sort by largest segment first */ 706 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 707 if ((end - start) > 708 (vm_physmem[lcv].end - vm_physmem[lcv].start)) 709 break; 710 ps = &vm_physmem[lcv]; 711 /* move back other entries, if necessary ... */ 712 for (x = vm_nphysseg ; x > lcv ; x--) 713 /* structure copy */ 714 vm_physmem[x] = vm_physmem[x - 1]; 715 } 716 717#else 718 719 panic("uvm_page_physload: unknown physseg strategy selected!"); 720 721#endif 722 723 ps->start = start; 724 ps->end = end; 725 ps->avail_start = avail_start; 726 ps->avail_end = avail_end; 727 if (preload) { 728 ps->pgs = NULL; 729 } else { 730 ps->pgs = pgs; 731 ps->lastpg = pgs + npages - 1; 732 } 733 ps->free_list = free_list; 734 vm_nphysseg++; 735 736 /* 737 * done! 738 */ 739 740 return; 741} 742 743#ifdef DDB /* XXXCDC: TMP TMP TMP DEBUG DEBUG DEBUG */ 744 745void uvm_page_physdump(void); /* SHUT UP GCC */ 746 747/* call from DDB */ 748void 749uvm_page_physdump(void) 750{ 751 int lcv; 752 753 printf("uvm_page_physdump: physical memory config [segs=%d of %d]:\n", 754 vm_nphysseg, VM_PHYSSEG_MAX); 755 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 756 printf("0x%llx->0x%llx [0x%llx->0x%llx]\n", 757 (long long)vm_physmem[lcv].start, 758 (long long)vm_physmem[lcv].end, 759 (long long)vm_physmem[lcv].avail_start, 760 (long long)vm_physmem[lcv].avail_end); 761 printf("STRATEGY = "); 762 switch (VM_PHYSSEG_STRAT) { 763 case VM_PSTRAT_RANDOM: printf("RANDOM\n"); break; 764 case VM_PSTRAT_BSEARCH: printf("BSEARCH\n"); break; 765 case VM_PSTRAT_BIGFIRST: printf("BIGFIRST\n"); break; 766 default: printf("<<UNKNOWN>>!!!!\n"); 767 } 768} 769#endif 770 771void 772uvm_shutdown(void) 773{ 774#ifdef UVM_SWAP_ENCRYPT 775 uvm_swap_finicrypt_all(); 776#endif 777} 778 779/* 780 * Perform insert of a given page in the specified anon of obj. 781 * This is basically, uvm_pagealloc, but with the page already given. 782 */ 783void 784uvm_pagealloc_pg(struct vm_page *pg, struct uvm_object *obj, voff_t off, 785 struct vm_anon *anon) 786{ 787 int flags; 788 789 flags = PG_BUSY | PG_FAKE; 790 pg->offset = off; 791 pg->uobject = obj; 792 pg->uanon = anon; 793 794 if (anon) { 795 anon->an_page = pg; 796 flags |= PQ_ANON; 797 } else if (obj) 798 uvm_pageinsert(pg); 799 atomic_setbits_int(&pg->pg_flags, flags); 800#if defined(UVM_PAGE_TRKOWN) 801 pg->owner_tag = NULL; 802#endif 803 UVM_PAGE_OWN(pg, "new alloc"); 804} 805 806/* 807 * uvm_pagealloc_strat: allocate vm_page from a particular free list. 808 * 809 * => return null if no pages free 810 * => wake up pagedaemon if number of free pages drops below low water mark 811 * => if obj != NULL, obj must be locked (to put in tree) 812 * => if anon != NULL, anon must be locked (to put in anon) 813 * => only one of obj or anon can be non-null 814 * => caller must activate/deactivate page if it is not wired. 815 */ 816 817struct vm_page * 818uvm_pagealloc(struct uvm_object *obj, voff_t off, struct vm_anon *anon, 819 int flags) 820{ 821 struct vm_page *pg; 822 struct pglist pgl; 823 int pmr_flags; 824 boolean_t use_reserve; 825 UVMHIST_FUNC("uvm_pagealloc"); UVMHIST_CALLED(pghist); 826 827 KASSERT(obj == NULL || anon == NULL); 828 KASSERT(off == trunc_page(off)); 829 830 /* 831 * check to see if we need to generate some free pages waking 832 * the pagedaemon. 833 */ 834 if ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freemin || 835 ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freetarg && 836 (uvmexp.inactive + BUFPAGES_INACT) < uvmexp.inactarg)) 837 wakeup(&uvm.pagedaemon); 838 839 /* 840 * fail if any of these conditions is true: 841 * [1] there really are no free pages, or 842 * [2] only kernel "reserved" pages remain and 843 * the page isn't being allocated to a kernel object. 844 * [3] only pagedaemon "reserved" pages remain and 845 * the requestor isn't the pagedaemon. 846 */ 847 848 use_reserve = (flags & UVM_PGA_USERESERVE) || 849 (obj && UVM_OBJ_IS_KERN_OBJECT(obj)); 850 if ((uvmexp.free <= uvmexp.reserve_kernel && !use_reserve) || 851 (uvmexp.free <= uvmexp.reserve_pagedaemon && 852 !((curproc == uvm.pagedaemon_proc) || 853 (curproc == syncerproc)))) 854 goto fail; 855 856 pmr_flags = UVM_PLA_NOWAIT; 857 if (flags & UVM_PGA_ZERO) 858 pmr_flags |= UVM_PLA_ZERO; 859 TAILQ_INIT(&pgl); 860 if (uvm_pmr_getpages(1, 0, 0, 1, 0, 1, pmr_flags, &pgl) != 0) 861 goto fail; 862 863 pg = TAILQ_FIRST(&pgl); 864 KASSERT(pg != NULL && TAILQ_NEXT(pg, pageq) == NULL); 865 866 uvm_pagealloc_pg(pg, obj, off, anon); 867 KASSERT((pg->pg_flags & PG_DEV) == 0); 868 atomic_setbits_int(&pg->pg_flags, PG_BUSY|PG_CLEAN|PG_FAKE); 869 if (flags & UVM_PGA_ZERO) 870 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 871 872 UVMHIST_LOG(pghist, "allocated pg %p/%lx", pg, 873 (u_long)VM_PAGE_TO_PHYS(pg), 0, 0); 874 return(pg); 875 876 fail: 877 UVMHIST_LOG(pghist, "failed!", 0, 0, 0, 0); 878 return (NULL); 879} 880 881/* 882 * uvm_pagerealloc: reallocate a page from one object to another 883 * 884 * => both objects must be locked 885 */ 886 887void 888uvm_pagerealloc(struct vm_page *pg, struct uvm_object *newobj, voff_t newoff) 889{ 890 891 UVMHIST_FUNC("uvm_pagerealloc"); UVMHIST_CALLED(pghist); 892 893 /* 894 * remove it from the old object 895 */ 896 897 if (pg->uobject) { 898 uvm_pageremove(pg); 899 } 900 901 /* 902 * put it in the new object 903 */ 904 905 if (newobj) { 906 pg->uobject = newobj; 907 pg->offset = newoff; 908 pg->pg_version++; 909 uvm_pageinsert(pg); 910 } 911} 912 913 914/* 915 * uvm_pagefree: free page 916 * 917 * => erase page's identity (i.e. remove from object) 918 * => put page on free list 919 * => caller must lock owning object (either anon or uvm_object) 920 * => caller must lock page queues 921 * => assumes all valid mappings of pg are gone 922 */ 923 924void 925uvm_pagefree(struct vm_page *pg) 926{ 927 int saved_loan_count = pg->loan_count; 928 UVMHIST_FUNC("uvm_pagefree"); UVMHIST_CALLED(pghist); 929 930#ifdef DEBUG 931 if (pg->uobject == (void *)0xdeadbeef && 932 pg->uanon == (void *)0xdeadbeef) { 933 panic("uvm_pagefree: freeing free page %p", pg); 934 } 935#endif 936 937 UVMHIST_LOG(pghist, "freeing pg %p/%lx", pg, 938 (u_long)VM_PAGE_TO_PHYS(pg), 0, 0); 939 KASSERT((pg->pg_flags & PG_DEV) == 0); 940 941 /* 942 * if the page was an object page (and thus "TABLED"), remove it 943 * from the object. 944 */ 945 946 if (pg->pg_flags & PG_TABLED) { 947 948 /* 949 * if the object page is on loan we are going to drop ownership. 950 * it is possible that an anon will take over as owner for this 951 * page later on. the anon will want a !PG_CLEAN page so that 952 * it knows it needs to allocate swap if it wants to page the 953 * page out. 954 */ 955 956 /* in case an anon takes over */ 957 if (saved_loan_count) 958 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 959 uvm_pageremove(pg); 960 961 /* 962 * if our page was on loan, then we just lost control over it 963 * (in fact, if it was loaned to an anon, the anon may have 964 * already taken over ownership of the page by now and thus 965 * changed the loan_count [e.g. in uvmfault_anonget()]) we just 966 * return (when the last loan is dropped, then the page can be 967 * freed by whatever was holding the last loan). 968 */ 969 970 if (saved_loan_count) 971 return; 972 } else if (saved_loan_count && pg->uanon) { 973 /* 974 * if our page is owned by an anon and is loaned out to the 975 * kernel then we just want to drop ownership and return. 976 * the kernel must free the page when all its loans clear ... 977 * note that the kernel can't change the loan status of our 978 * page as long as we are holding PQ lock. 979 */ 980 atomic_clearbits_int(&pg->pg_flags, PQ_ANON); 981 pg->uanon->an_page = NULL; 982 pg->uanon = NULL; 983 return; 984 } 985 KASSERT(saved_loan_count == 0); 986 987 /* 988 * now remove the page from the queues 989 */ 990 991 if (pg->pg_flags & PQ_ACTIVE) { 992 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 993 atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE); 994 uvmexp.active--; 995 } 996 if (pg->pg_flags & PQ_INACTIVE) { 997 if (pg->pg_flags & PQ_SWAPBACKED) 998 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); 999 else 1000 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); 1001 atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE); 1002 uvmexp.inactive--; 1003 } 1004 1005 /* 1006 * if the page was wired, unwire it now. 1007 */ 1008 1009 if (pg->wire_count) { 1010 pg->wire_count = 0; 1011 uvmexp.wired--; 1012 } 1013 if (pg->uanon) { 1014 pg->uanon->an_page = NULL; 1015 pg->uanon = NULL; 1016 atomic_clearbits_int(&pg->pg_flags, PQ_ANON); 1017 } 1018 1019 /* 1020 * Clean page state bits. 1021 */ 1022 atomic_clearbits_int(&pg->pg_flags, PQ_AOBJ); /* XXX: find culprit */ 1023 atomic_clearbits_int(&pg->pg_flags, PQ_ENCRYPT| 1024 PG_ZERO|PG_FAKE|PG_BUSY|PG_RELEASED|PG_CLEAN|PG_CLEANCHK); 1025 1026 /* 1027 * and put on free queue 1028 */ 1029 1030#ifdef DEBUG 1031 pg->uobject = (void *)0xdeadbeef; 1032 pg->offset = 0xdeadbeef; 1033 pg->uanon = (void *)0xdeadbeef; 1034#endif 1035 1036 uvm_pmr_freepages(pg, 1); 1037 1038 if (uvmexp.zeropages < UVM_PAGEZERO_TARGET) 1039 uvm.page_idle_zero = vm_page_zero_enable; 1040} 1041 1042/* 1043 * uvm_page_unbusy: unbusy an array of pages. 1044 * 1045 * => pages must either all belong to the same object, or all belong to anons. 1046 * => if pages are object-owned, object must be locked. 1047 * => if pages are anon-owned, anons must be unlockd and have 0 refcount. 1048 */ 1049 1050void 1051uvm_page_unbusy(struct vm_page **pgs, int npgs) 1052{ 1053 struct vm_page *pg; 1054 struct uvm_object *uobj; 1055 int i; 1056 UVMHIST_FUNC("uvm_page_unbusy"); UVMHIST_CALLED(pdhist); 1057 1058 for (i = 0; i < npgs; i++) { 1059 pg = pgs[i]; 1060 1061 if (pg == NULL || pg == PGO_DONTCARE) { 1062 continue; 1063 } 1064 if (pg->pg_flags & PG_WANTED) { 1065 wakeup(pg); 1066 } 1067 if (pg->pg_flags & PG_RELEASED) { 1068 UVMHIST_LOG(pdhist, "releasing pg %p", pg,0,0,0); 1069 uobj = pg->uobject; 1070 if (uobj != NULL) { 1071 uvm_lock_pageq(); 1072 pmap_page_protect(pg, VM_PROT_NONE); 1073 /* XXX won't happen right now */ 1074 if (pg->pg_flags & PQ_ANON) 1075 uao_dropswap(uobj, 1076 pg->offset >> PAGE_SHIFT); 1077 uvm_pagefree(pg); 1078 uvm_unlock_pageq(); 1079 } else { 1080 atomic_clearbits_int(&pg->pg_flags, PG_BUSY); 1081 UVM_PAGE_OWN(pg, NULL); 1082 uvm_anfree(pg->uanon); 1083 } 1084 } else { 1085 UVMHIST_LOG(pdhist, "unbusying pg %p", pg,0,0,0); 1086 atomic_clearbits_int(&pg->pg_flags, PG_WANTED|PG_BUSY); 1087 UVM_PAGE_OWN(pg, NULL); 1088 } 1089 } 1090} 1091 1092#if defined(UVM_PAGE_TRKOWN) 1093/* 1094 * uvm_page_own: set or release page ownership 1095 * 1096 * => this is a debugging function that keeps track of who sets PG_BUSY 1097 * and where they do it. it can be used to track down problems 1098 * such a process setting "PG_BUSY" and never releasing it. 1099 * => page's object [if any] must be locked 1100 * => if "tag" is NULL then we are releasing page ownership 1101 */ 1102void 1103uvm_page_own(struct vm_page *pg, char *tag) 1104{ 1105 /* gain ownership? */ 1106 if (tag) { 1107 if (pg->owner_tag) { 1108 printf("uvm_page_own: page %p already owned " 1109 "by proc %d [%s]\n", pg, 1110 pg->owner, pg->owner_tag); 1111 panic("uvm_page_own"); 1112 } 1113 pg->owner = (curproc) ? curproc->p_pid : (pid_t) -1; 1114 pg->owner_tag = tag; 1115 return; 1116 } 1117 1118 /* drop ownership */ 1119 if (pg->owner_tag == NULL) { 1120 printf("uvm_page_own: dropping ownership of an non-owned " 1121 "page (%p)\n", pg); 1122 panic("uvm_page_own"); 1123 } 1124 pg->owner_tag = NULL; 1125 return; 1126} 1127#endif 1128 1129/* 1130 * uvm_pageidlezero: zero free pages while the system is idle. 1131 * 1132 * => we do at least one iteration per call, if we are below the target. 1133 * => we loop until we either reach the target or whichqs indicates that 1134 * there is a process ready to run. 1135 */ 1136void 1137uvm_pageidlezero(void) 1138{ 1139#if 0 /* disabled: need new code */ 1140 struct vm_page *pg; 1141 struct pgfreelist *pgfl; 1142 int free_list; 1143 UVMHIST_FUNC("uvm_pageidlezero"); UVMHIST_CALLED(pghist); 1144 1145 do { 1146 uvm_lock_fpageq(); 1147 1148 if (uvmexp.zeropages >= UVM_PAGEZERO_TARGET) { 1149 uvm.page_idle_zero = FALSE; 1150 uvm_unlock_fpageq(); 1151 return; 1152 } 1153 1154 for (free_list = 0; free_list < VM_NFREELIST; free_list++) { 1155 pgfl = &uvm.page_free[free_list]; 1156 if ((pg = TAILQ_FIRST(&pgfl->pgfl_queues[ 1157 PGFL_UNKNOWN])) != NULL) 1158 break; 1159 } 1160 1161 if (pg == NULL) { 1162 /* 1163 * No non-zero'd pages; don't bother trying again 1164 * until we know we have non-zero'd pages free. 1165 */ 1166 uvm.page_idle_zero = FALSE; 1167 uvm_unlock_fpageq(); 1168 return; 1169 } 1170 1171 TAILQ_REMOVE(&pgfl->pgfl_queues[PGFL_UNKNOWN], pg, pageq); 1172 uvmexp.free--; 1173 uvm_unlock_fpageq(); 1174 1175#ifdef PMAP_PAGEIDLEZERO 1176 if (PMAP_PAGEIDLEZERO(pg) == FALSE) { 1177 /* 1178 * The machine-dependent code detected some 1179 * reason for us to abort zeroing pages, 1180 * probably because there is a process now 1181 * ready to run. 1182 */ 1183 uvm_lock_fpageq(); 1184 TAILQ_INSERT_HEAD(&pgfl->pgfl_queues[PGFL_UNKNOWN], 1185 pg, pageq); 1186 uvmexp.free++; 1187 uvmexp.zeroaborts++; 1188 uvm_unlock_fpageq(); 1189 return; 1190 } 1191#else 1192 /* 1193 * XXX This will toast the cache unless the pmap_zero_page() 1194 * XXX implementation does uncached access. 1195 */ 1196 pmap_zero_page(pg); 1197#endif 1198 atomic_setbits_int(&pg->pg_flags, PG_ZERO); 1199 1200 uvm_lock_fpageq(); 1201 TAILQ_INSERT_HEAD(&pgfl->pgfl_queues[PGFL_ZEROS], pg, pageq); 1202 uvmexp.free++; 1203 uvmexp.zeropages++; 1204 uvm_unlock_fpageq(); 1205 } while (curcpu_is_idle()); 1206#endif /* 0 */ 1207} 1208 1209/* 1210 * when VM_PHYSSEG_MAX is 1, we can simplify these functions 1211 */ 1212 1213#if VM_PHYSSEG_MAX > 1 1214/* 1215 * vm_physseg_find: find vm_physseg structure that belongs to a PA 1216 */ 1217int 1218vm_physseg_find(paddr_t pframe, int *offp) 1219{ 1220 1221#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 1222 /* binary search for it */ 1223 int start, len, try; 1224 1225 /* 1226 * if try is too large (thus target is less than than try) we reduce 1227 * the length to trunc(len/2) [i.e. everything smaller than "try"] 1228 * 1229 * if the try is too small (thus target is greater than try) then 1230 * we set the new start to be (try + 1). this means we need to 1231 * reduce the length to (round(len/2) - 1). 1232 * 1233 * note "adjust" below which takes advantage of the fact that 1234 * (round(len/2) - 1) == trunc((len - 1) / 2) 1235 * for any value of len we may have 1236 */ 1237 1238 for (start = 0, len = vm_nphysseg ; len != 0 ; len = len / 2) { 1239 try = start + (len / 2); /* try in the middle */ 1240 1241 /* start past our try? */ 1242 if (pframe >= vm_physmem[try].start) { 1243 /* was try correct? */ 1244 if (pframe < vm_physmem[try].end) { 1245 if (offp) 1246 *offp = pframe - vm_physmem[try].start; 1247 return(try); /* got it */ 1248 } 1249 start = try + 1; /* next time, start here */ 1250 len--; /* "adjust" */ 1251 } else { 1252 /* 1253 * pframe before try, just reduce length of 1254 * region, done in "for" loop 1255 */ 1256 } 1257 } 1258 return(-1); 1259 1260#else 1261 /* linear search for it */ 1262 int lcv; 1263 1264 for (lcv = 0; lcv < vm_nphysseg; lcv++) { 1265 if (pframe >= vm_physmem[lcv].start && 1266 pframe < vm_physmem[lcv].end) { 1267 if (offp) 1268 *offp = pframe - vm_physmem[lcv].start; 1269 return(lcv); /* got it */ 1270 } 1271 } 1272 return(-1); 1273 1274#endif 1275} 1276 1277/* 1278 * PHYS_TO_VM_PAGE: find vm_page for a PA. used by MI code to get vm_pages 1279 * back from an I/O mapping (ugh!). used in some MD code as well. 1280 */ 1281struct vm_page * 1282PHYS_TO_VM_PAGE(paddr_t pa) 1283{ 1284 paddr_t pf = atop(pa); 1285 int off; 1286 int psi; 1287 1288 psi = vm_physseg_find(pf, &off); 1289 1290 return ((psi == -1) ? NULL : &vm_physmem[psi].pgs[off]); 1291} 1292#endif /* VM_PHYSSEG_MAX > 1 */ 1293 1294/* 1295 * uvm_pagelookup: look up a page 1296 * 1297 * => caller should lock object to keep someone from pulling the page 1298 * out from under it 1299 */ 1300struct vm_page * 1301uvm_pagelookup(struct uvm_object *obj, voff_t off) 1302{ 1303 /* XXX if stack is too much, handroll */ 1304 struct vm_page pg; 1305 1306 pg.offset = off; 1307 return (RB_FIND(uvm_objtree, &obj->memt, &pg)); 1308} 1309 1310/* 1311 * uvm_pagewire: wire the page, thus removing it from the daemon's grasp 1312 * 1313 * => caller must lock page queues 1314 */ 1315void 1316uvm_pagewire(struct vm_page *pg) 1317{ 1318 if (pg->wire_count == 0) { 1319 if (pg->pg_flags & PQ_ACTIVE) { 1320 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1321 atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE); 1322 uvmexp.active--; 1323 } 1324 if (pg->pg_flags & PQ_INACTIVE) { 1325 if (pg->pg_flags & PQ_SWAPBACKED) 1326 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); 1327 else 1328 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); 1329 atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE); 1330 uvmexp.inactive--; 1331 } 1332 uvmexp.wired++; 1333 } 1334 pg->wire_count++; 1335} 1336 1337/* 1338 * uvm_pageunwire: unwire the page. 1339 * 1340 * => activate if wire count goes to zero. 1341 * => caller must lock page queues 1342 */ 1343void 1344uvm_pageunwire(struct vm_page *pg) 1345{ 1346 pg->wire_count--; 1347 if (pg->wire_count == 0) { 1348 TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq); 1349 uvmexp.active++; 1350 atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE); 1351 uvmexp.wired--; 1352 } 1353} 1354 1355/* 1356 * uvm_pagedeactivate: deactivate page -- no pmaps have access to page 1357 * 1358 * => caller must lock page queues 1359 * => caller must check to make sure page is not wired 1360 * => object that page belongs to must be locked (so we can adjust pg->flags) 1361 */ 1362void 1363uvm_pagedeactivate(struct vm_page *pg) 1364{ 1365 if (pg->pg_flags & PQ_ACTIVE) { 1366 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1367 atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE); 1368 uvmexp.active--; 1369 } 1370 if ((pg->pg_flags & PQ_INACTIVE) == 0) { 1371 KASSERT(pg->wire_count == 0); 1372 if (pg->pg_flags & PQ_SWAPBACKED) 1373 TAILQ_INSERT_TAIL(&uvm.page_inactive_swp, pg, pageq); 1374 else 1375 TAILQ_INSERT_TAIL(&uvm.page_inactive_obj, pg, pageq); 1376 atomic_setbits_int(&pg->pg_flags, PQ_INACTIVE); 1377 uvmexp.inactive++; 1378 pmap_clear_reference(pg); 1379 /* 1380 * update the "clean" bit. this isn't 100% 1381 * accurate, and doesn't have to be. we'll 1382 * re-sync it after we zap all mappings when 1383 * scanning the inactive list. 1384 */ 1385 if ((pg->pg_flags & PG_CLEAN) != 0 && 1386 pmap_is_modified(pg)) 1387 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 1388 } 1389} 1390 1391/* 1392 * uvm_pageactivate: activate page 1393 * 1394 * => caller must lock page queues 1395 */ 1396void 1397uvm_pageactivate(struct vm_page *pg) 1398{ 1399 if (pg->pg_flags & PQ_INACTIVE) { 1400 if (pg->pg_flags & PQ_SWAPBACKED) 1401 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); 1402 else 1403 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); 1404 atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE); 1405 uvmexp.inactive--; 1406 } 1407 if (pg->wire_count == 0) { 1408 1409 /* 1410 * if page is already active, remove it from list so we 1411 * can put it at tail. if it wasn't active, then mark 1412 * it active and bump active count 1413 */ 1414 if (pg->pg_flags & PQ_ACTIVE) 1415 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1416 else { 1417 atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE); 1418 uvmexp.active++; 1419 } 1420 1421 TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq); 1422 } 1423} 1424 1425/* 1426 * uvm_pagezero: zero fill a page 1427 * 1428 * => if page is part of an object then the object should be locked 1429 * to protect pg->flags. 1430 */ 1431void 1432uvm_pagezero(struct vm_page *pg) 1433{ 1434 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 1435 pmap_zero_page(pg); 1436} 1437 1438/* 1439 * uvm_pagecopy: copy a page 1440 * 1441 * => if page is part of an object then the object should be locked 1442 * to protect pg->flags. 1443 */ 1444void 1445uvm_pagecopy(struct vm_page *src, struct vm_page *dst) 1446{ 1447 atomic_clearbits_int(&dst->pg_flags, PG_CLEAN); 1448 pmap_copy_page(src, dst); 1449} 1450 1451/* 1452 * uvm_page_lookup_freelist: look up the free list for the specified page 1453 */ 1454int 1455uvm_page_lookup_freelist(struct vm_page *pg) 1456{ 1457#if VM_PHYSSEG_MAX == 1 1458 return (vm_physmem[0].free_list); 1459#else 1460 int lcv; 1461 1462 lcv = vm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), NULL); 1463 KASSERT(lcv != -1); 1464 return (vm_physmem[lcv].free_list); 1465#endif 1466} 1467 1468/* 1469 * uvm_pagecount: count the number of physical pages in the address range. 1470 */ 1471psize_t 1472uvm_pagecount(struct uvm_constraint_range* constraint) 1473{ 1474 int lcv; 1475 psize_t sz; 1476 paddr_t low, high; 1477 paddr_t ps_low, ps_high; 1478 1479 /* Algorithm uses page numbers. */ 1480 low = atop(constraint->ucr_low); 1481 high = atop(constraint->ucr_high); 1482 1483 sz = 0; 1484 for (lcv = 0; lcv < vm_nphysseg; lcv++) { 1485 ps_low = MAX(low, vm_physmem[lcv].avail_start); 1486 ps_high = MIN(high, vm_physmem[lcv].avail_end); 1487 if (ps_low < ps_high) 1488 sz += ps_high - ps_low; 1489 } 1490 return sz; 1491} 1492