uvm_page.c revision 1.124
1/* $OpenBSD: uvm_page.c,v 1.124 2013/05/30 15:17:59 tedu Exp $ */ 2/* $NetBSD: uvm_page.c,v 1.44 2000/11/27 08:40:04 chs Exp $ */ 3 4/* 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * Copyright (c) 1991, 1993, The Regents of the University of California. 7 * 8 * All rights reserved. 9 * 10 * This code is derived from software contributed to Berkeley by 11 * The Mach Operating System project at Carnegie-Mellon University. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by Charles D. Cranor, 24 * Washington University, the University of California, Berkeley and 25 * its contributors. 26 * 4. Neither the name of the University nor the names of its contributors 27 * may be used to endorse or promote products derived from this software 28 * without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 * 42 * @(#)vm_page.c 8.3 (Berkeley) 3/21/94 43 * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp 44 * 45 * 46 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 47 * All rights reserved. 48 * 49 * Permission to use, copy, modify and distribute this software and 50 * its documentation is hereby granted, provided that both the copyright 51 * notice and this permission notice appear in all copies of the 52 * software, derivative works or modified versions, and any portions 53 * thereof, and that both notices appear in supporting documentation. 54 * 55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 56 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 58 * 59 * Carnegie Mellon requests users of this software to return to 60 * 61 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 62 * School of Computer Science 63 * Carnegie Mellon University 64 * Pittsburgh PA 15213-3890 65 * 66 * any improvements or extensions that they make and grant Carnegie the 67 * rights to redistribute these changes. 68 */ 69 70/* 71 * uvm_page.c: page ops. 72 */ 73 74#include <sys/param.h> 75#include <sys/systm.h> 76#include <sys/sched.h> 77#include <sys/kernel.h> 78#include <sys/vnode.h> 79#include <sys/mount.h> 80#include <sys/proc.h> 81 82#include <uvm/uvm.h> 83 84/* 85 * for object trees 86 */ 87RB_GENERATE(uvm_objtree, vm_page, objt, uvm_pagecmp); 88 89int 90uvm_pagecmp(struct vm_page *a, struct vm_page *b) 91{ 92 return (a->offset < b->offset ? -1 : a->offset > b->offset); 93} 94 95/* 96 * global vars... XXXCDC: move to uvm. structure. 97 */ 98 99/* 100 * physical memory config is stored in vm_physmem. 101 */ 102 103struct vm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */ 104int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */ 105 106/* 107 * Some supported CPUs in a given architecture don't support all 108 * of the things necessary to do idle page zero'ing efficiently. 109 * We therefore provide a way to disable it from machdep code here. 110 */ 111 112/* 113 * XXX disabled until we can find a way to do this without causing 114 * problems for either cpu caches or DMA latency. 115 */ 116boolean_t vm_page_zero_enable = FALSE; 117 118/* 119 * local variables 120 */ 121 122/* 123 * these variables record the values returned by vm_page_bootstrap, 124 * for debugging purposes. The implementation of uvm_pageboot_alloc 125 * and pmap_startup here also uses them internally. 126 */ 127 128static vaddr_t virtual_space_start; 129static vaddr_t virtual_space_end; 130 131/* 132 * local prototypes 133 */ 134 135static void uvm_pageinsert(struct vm_page *); 136static void uvm_pageremove(struct vm_page *); 137 138/* 139 * inline functions 140 */ 141 142/* 143 * uvm_pageinsert: insert a page in the object 144 * 145 * => caller must lock object 146 * => caller must lock page queues XXX questionable 147 * => call should have already set pg's object and offset pointers 148 * and bumped the version counter 149 */ 150 151__inline static void 152uvm_pageinsert(struct vm_page *pg) 153{ 154 struct vm_page *dupe; 155 156 KASSERT((pg->pg_flags & PG_TABLED) == 0); 157 dupe = RB_INSERT(uvm_objtree, &pg->uobject->memt, pg); 158 /* not allowed to insert over another page */ 159 KASSERT(dupe == NULL); 160 atomic_setbits_int(&pg->pg_flags, PG_TABLED); 161 pg->uobject->uo_npages++; 162} 163 164/* 165 * uvm_page_remove: remove page from object 166 * 167 * => caller must lock object 168 * => caller must lock page queues 169 */ 170 171static __inline void 172uvm_pageremove(struct vm_page *pg) 173{ 174 175 KASSERT(pg->pg_flags & PG_TABLED); 176 RB_REMOVE(uvm_objtree, &pg->uobject->memt, pg); 177 178 atomic_clearbits_int(&pg->pg_flags, PG_TABLED); 179 pg->uobject->uo_npages--; 180 pg->uobject = NULL; 181 pg->pg_version++; 182} 183 184/* 185 * uvm_page_init: init the page system. called from uvm_init(). 186 * 187 * => we return the range of kernel virtual memory in kvm_startp/kvm_endp 188 */ 189 190void 191uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp) 192{ 193 vsize_t freepages, pagecount, n; 194 vm_page_t pagearray, curpg; 195 int lcv, i; 196 paddr_t paddr, pgno; 197 struct vm_physseg *seg; 198 199 /* 200 * init the page queues and page queue locks 201 */ 202 203 TAILQ_INIT(&uvm.page_active); 204 TAILQ_INIT(&uvm.page_inactive_swp); 205 TAILQ_INIT(&uvm.page_inactive_obj); 206 mtx_init(&uvm.fpageqlock, IPL_VM); 207 uvm_pmr_init(); 208 209 /* 210 * allocate vm_page structures. 211 */ 212 213 /* 214 * sanity check: 215 * before calling this function the MD code is expected to register 216 * some free RAM with the uvm_page_physload() function. our job 217 * now is to allocate vm_page structures for this memory. 218 */ 219 220 if (vm_nphysseg == 0) 221 panic("uvm_page_bootstrap: no memory pre-allocated"); 222 223 /* 224 * first calculate the number of free pages... 225 * 226 * note that we use start/end rather than avail_start/avail_end. 227 * this allows us to allocate extra vm_page structures in case we 228 * want to return some memory to the pool after booting. 229 */ 230 231 freepages = 0; 232 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++) 233 freepages += (seg->end - seg->start); 234 235 /* 236 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can 237 * use. for each page of memory we use we need a vm_page structure. 238 * thus, the total number of pages we can use is the total size of 239 * the memory divided by the PAGE_SIZE plus the size of the vm_page 240 * structure. we add one to freepages as a fudge factor to avoid 241 * truncation errors (since we can only allocate in terms of whole 242 * pages). 243 */ 244 245 pagecount = (((paddr_t)freepages + 1) << PAGE_SHIFT) / 246 (PAGE_SIZE + sizeof(struct vm_page)); 247 pagearray = (vm_page_t)uvm_pageboot_alloc(pagecount * 248 sizeof(struct vm_page)); 249 memset(pagearray, 0, pagecount * sizeof(struct vm_page)); 250 251 /* 252 * init the vm_page structures and put them in the correct place. 253 */ 254 255 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++) { 256 n = seg->end - seg->start; 257 if (n > pagecount) { 258 panic("uvm_page_init: lost %ld page(s) in init", 259 (long)(n - pagecount)); 260 /* XXXCDC: shouldn't happen? */ 261 /* n = pagecount; */ 262 } 263 264 /* set up page array pointers */ 265 seg->pgs = pagearray; 266 pagearray += n; 267 pagecount -= n; 268 seg->lastpg = seg->pgs + (n - 1); 269 270 /* init and free vm_pages (we've already zeroed them) */ 271 pgno = seg->start; 272 paddr = ptoa(pgno); 273 for (i = 0, curpg = seg->pgs; i < n; 274 i++, curpg++, pgno++, paddr += PAGE_SIZE) { 275 curpg->phys_addr = paddr; 276#ifdef __HAVE_VM_PAGE_MD 277 VM_MDPAGE_INIT(curpg); 278#endif 279 if (pgno >= seg->avail_start && 280 pgno <= seg->avail_end) { 281 uvmexp.npages++; 282 } 283 } 284 285 /* 286 * Add pages to free pool. 287 */ 288 uvm_pmr_freepages(&seg->pgs[seg->avail_start - seg->start], 289 seg->avail_end - seg->avail_start); 290 } 291 292 /* 293 * pass up the values of virtual_space_start and 294 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper 295 * layers of the VM. 296 */ 297 298 *kvm_startp = round_page(virtual_space_start); 299 *kvm_endp = trunc_page(virtual_space_end); 300 301 /* 302 * init locks for kernel threads 303 */ 304 mtx_init(&uvm.aiodoned_lock, IPL_BIO); 305 306 /* 307 * init reserve thresholds 308 * XXXCDC - values may need adjusting 309 */ 310 uvmexp.reserve_pagedaemon = 4; 311 uvmexp.reserve_kernel = 6; 312 uvmexp.anonminpct = 10; 313 uvmexp.vnodeminpct = 10; 314 uvmexp.vtextminpct = 5; 315 uvmexp.anonmin = uvmexp.anonminpct * 256 / 100; 316 uvmexp.vnodemin = uvmexp.vnodeminpct * 256 / 100; 317 uvmexp.vtextmin = uvmexp.vtextminpct * 256 / 100; 318 319 /* 320 * determine if we should zero pages in the idle loop. 321 */ 322 323 uvm.page_idle_zero = vm_page_zero_enable; 324 325 /* 326 * done! 327 */ 328 329 uvm.page_init_done = TRUE; 330} 331 332/* 333 * uvm_setpagesize: set the page size 334 * 335 * => sets page_shift and page_mask from uvmexp.pagesize. 336 */ 337 338void 339uvm_setpagesize(void) 340{ 341 if (uvmexp.pagesize == 0) 342 uvmexp.pagesize = DEFAULT_PAGE_SIZE; 343 uvmexp.pagemask = uvmexp.pagesize - 1; 344 if ((uvmexp.pagemask & uvmexp.pagesize) != 0) 345 panic("uvm_setpagesize: page size not a power of two"); 346 for (uvmexp.pageshift = 0; ; uvmexp.pageshift++) 347 if ((1 << uvmexp.pageshift) == uvmexp.pagesize) 348 break; 349} 350 351/* 352 * uvm_pageboot_alloc: steal memory from physmem for bootstrapping 353 */ 354 355vaddr_t 356uvm_pageboot_alloc(vsize_t size) 357{ 358#if defined(PMAP_STEAL_MEMORY) 359 vaddr_t addr; 360 361 /* 362 * defer bootstrap allocation to MD code (it may want to allocate 363 * from a direct-mapped segment). pmap_steal_memory should round 364 * off virtual_space_start/virtual_space_end. 365 */ 366 367 addr = pmap_steal_memory(size, &virtual_space_start, 368 &virtual_space_end); 369 370 return(addr); 371 372#else /* !PMAP_STEAL_MEMORY */ 373 374 static boolean_t initialized = FALSE; 375 vaddr_t addr, vaddr; 376 paddr_t paddr; 377 378 /* round to page size */ 379 size = round_page(size); 380 381 /* 382 * on first call to this function, initialize ourselves. 383 */ 384 if (initialized == FALSE) { 385 pmap_virtual_space(&virtual_space_start, &virtual_space_end); 386 387 /* round it the way we like it */ 388 virtual_space_start = round_page(virtual_space_start); 389 virtual_space_end = trunc_page(virtual_space_end); 390 391 initialized = TRUE; 392 } 393 394 /* 395 * allocate virtual memory for this request 396 */ 397 if (virtual_space_start == virtual_space_end || 398 (virtual_space_end - virtual_space_start) < size) 399 panic("uvm_pageboot_alloc: out of virtual space"); 400 401 addr = virtual_space_start; 402 403#ifdef PMAP_GROWKERNEL 404 /* 405 * If the kernel pmap can't map the requested space, 406 * then allocate more resources for it. 407 */ 408 if (uvm_maxkaddr < (addr + size)) { 409 uvm_maxkaddr = pmap_growkernel(addr + size); 410 if (uvm_maxkaddr < (addr + size)) 411 panic("uvm_pageboot_alloc: pmap_growkernel() failed"); 412 } 413#endif 414 415 virtual_space_start += size; 416 417 /* 418 * allocate and mapin physical pages to back new virtual pages 419 */ 420 421 for (vaddr = round_page(addr) ; vaddr < addr + size ; 422 vaddr += PAGE_SIZE) { 423 424 if (!uvm_page_physget(&paddr)) 425 panic("uvm_pageboot_alloc: out of memory"); 426 427 /* 428 * Note this memory is no longer managed, so using 429 * pmap_kenter is safe. 430 */ 431 pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE); 432 } 433 pmap_update(pmap_kernel()); 434 return(addr); 435#endif /* PMAP_STEAL_MEMORY */ 436} 437 438#if !defined(PMAP_STEAL_MEMORY) 439/* 440 * uvm_page_physget: "steal" one page from the vm_physmem structure. 441 * 442 * => attempt to allocate it off the end of a segment in which the "avail" 443 * values match the start/end values. if we can't do that, then we 444 * will advance both values (making them equal, and removing some 445 * vm_page structures from the non-avail area). 446 * => return false if out of memory. 447 */ 448 449boolean_t 450uvm_page_physget(paddr_t *paddrp) 451{ 452 int lcv; 453 struct vm_physseg *seg; 454 455 /* pass 1: try allocating from a matching end */ 456#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \ 457 (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 458 for (lcv = vm_nphysseg - 1, seg = vm_physmem + lcv; lcv >= 0; 459 lcv--, seg--) 460#else 461 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++) 462#endif 463 { 464 if (uvm.page_init_done == TRUE) 465 panic("uvm_page_physget: called _after_ bootstrap"); 466 467 /* try from front */ 468 if (seg->avail_start == seg->start && 469 seg->avail_start < seg->avail_end) { 470 *paddrp = ptoa(seg->avail_start); 471 seg->avail_start++; 472 seg->start++; 473 /* nothing left? nuke it */ 474 if (seg->avail_start == seg->end) { 475 if (vm_nphysseg == 1) 476 panic("uvm_page_physget: out of memory!"); 477 vm_nphysseg--; 478 for (; lcv < vm_nphysseg; lcv++, seg++) 479 /* structure copy */ 480 seg[0] = seg[1]; 481 } 482 return (TRUE); 483 } 484 485 /* try from rear */ 486 if (seg->avail_end == seg->end && 487 seg->avail_start < seg->avail_end) { 488 *paddrp = ptoa(seg->avail_end - 1); 489 seg->avail_end--; 490 seg->end--; 491 /* nothing left? nuke it */ 492 if (seg->avail_end == seg->start) { 493 if (vm_nphysseg == 1) 494 panic("uvm_page_physget: out of memory!"); 495 vm_nphysseg--; 496 for (; lcv < vm_nphysseg ; lcv++, seg++) 497 /* structure copy */ 498 seg[0] = seg[1]; 499 } 500 return (TRUE); 501 } 502 } 503 504 /* pass2: forget about matching ends, just allocate something */ 505#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \ 506 (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 507 for (lcv = vm_nphysseg - 1, seg = vm_physmem + lcv; lcv >= 0; 508 lcv--, seg--) 509#else 510 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++) 511#endif 512 { 513 514 /* any room in this bank? */ 515 if (seg->avail_start >= seg->avail_end) 516 continue; /* nope */ 517 518 *paddrp = ptoa(seg->avail_start); 519 seg->avail_start++; 520 /* truncate! */ 521 seg->start = seg->avail_start; 522 523 /* nothing left? nuke it */ 524 if (seg->avail_start == seg->end) { 525 if (vm_nphysseg == 1) 526 panic("uvm_page_physget: out of memory!"); 527 vm_nphysseg--; 528 for (; lcv < vm_nphysseg ; lcv++, seg++) 529 /* structure copy */ 530 seg[0] = seg[1]; 531 } 532 return (TRUE); 533 } 534 535 return (FALSE); /* whoops! */ 536} 537 538#endif /* PMAP_STEAL_MEMORY */ 539 540/* 541 * uvm_page_physload: load physical memory into VM system 542 * 543 * => all args are PFs 544 * => all pages in start/end get vm_page structures 545 * => areas marked by avail_start/avail_end get added to the free page pool 546 * => we are limited to VM_PHYSSEG_MAX physical memory segments 547 */ 548 549void 550uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start, 551 paddr_t avail_end, int flags) 552{ 553 int preload, lcv; 554 psize_t npages; 555 struct vm_page *pgs; 556 struct vm_physseg *ps, *seg; 557 558#ifdef DIAGNOSTIC 559 if (uvmexp.pagesize == 0) 560 panic("uvm_page_physload: page size not set!"); 561 562 if (start >= end) 563 panic("uvm_page_physload: start >= end"); 564#endif 565 566 /* 567 * do we have room? 568 */ 569 if (vm_nphysseg == VM_PHYSSEG_MAX) { 570 printf("uvm_page_physload: unable to load physical memory " 571 "segment\n"); 572 printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n", 573 VM_PHYSSEG_MAX, (long long)start, (long long)end); 574 printf("\tincrease VM_PHYSSEG_MAX\n"); 575 return; 576 } 577 578 /* 579 * check to see if this is a "preload" (i.e. uvm_mem_init hasn't been 580 * called yet, so malloc is not available). 581 */ 582 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg; lcv++, seg++) { 583 if (seg->pgs) 584 break; 585 } 586 preload = (lcv == vm_nphysseg); 587 588 /* 589 * if VM is already running, attempt to malloc() vm_page structures 590 */ 591 if (!preload) { 592 /* 593 * XXXCDC: need some sort of lockout for this case 594 * right now it is only used by devices so it should be alright. 595 */ 596 paddr_t paddr; 597 598 npages = end - start; /* # of pages */ 599 600 pgs = (struct vm_page *)uvm_km_zalloc(kernel_map, 601 npages * sizeof(*pgs)); 602 if (pgs == NULL) { 603 printf("uvm_page_physload: can not malloc vm_page " 604 "structs for segment\n"); 605 printf("\tignoring 0x%lx -> 0x%lx\n", start, end); 606 return; 607 } 608 /* init phys_addr and free pages, XXX uvmexp.npages */ 609 for (lcv = 0, paddr = ptoa(start); lcv < npages; 610 lcv++, paddr += PAGE_SIZE) { 611 pgs[lcv].phys_addr = paddr; 612#ifdef __HAVE_VM_PAGE_MD 613 VM_MDPAGE_INIT(&pgs[lcv]); 614#endif 615 if (atop(paddr) >= avail_start && 616 atop(paddr) <= avail_end) { 617 if (flags & PHYSLOAD_DEVICE) { 618 atomic_setbits_int(&pgs[lcv].pg_flags, 619 PG_DEV); 620 pgs[lcv].wire_count = 1; 621 } else { 622#if defined(VM_PHYSSEG_NOADD) 623 panic("uvm_page_physload: tried to add RAM after vm_mem_init"); 624#endif 625 } 626 } 627 } 628 629 /* 630 * Add pages to free pool. 631 */ 632 if ((flags & PHYSLOAD_DEVICE) == 0) { 633 uvm_pmr_freepages(&pgs[avail_start - start], 634 avail_end - avail_start); 635 } 636 637 /* XXXCDC: need hook to tell pmap to rebuild pv_list, etc... */ 638 } else { 639 640 /* gcc complains if these don't get init'd */ 641 pgs = NULL; 642 npages = 0; 643 644 } 645 646 /* 647 * now insert us in the proper place in vm_physmem[] 648 */ 649 650#if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM) 651 652 /* random: put it at the end (easy!) */ 653 ps = &vm_physmem[vm_nphysseg]; 654 655#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 656 657 { 658 int x; 659 /* sort by address for binary search */ 660 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg; lcv++, seg++) 661 if (start < seg->start) 662 break; 663 ps = seg; 664 /* move back other entries, if necessary ... */ 665 for (x = vm_nphysseg, seg = vm_physmem + x - 1; x > lcv; 666 x--, seg--) 667 /* structure copy */ 668 seg[1] = seg[0]; 669 } 670 671#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) 672 673 { 674 int x; 675 /* sort by largest segment first */ 676 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg; lcv++, seg++) 677 if ((end - start) > 678 (seg->end - seg->start)) 679 break; 680 ps = &vm_physmem[lcv]; 681 /* move back other entries, if necessary ... */ 682 for (x = vm_nphysseg, seg = vm_physmem + x - 1; x > lcv; 683 x--, seg--) 684 /* structure copy */ 685 seg[1] = seg[0]; 686 } 687 688#else 689 690 panic("uvm_page_physload: unknown physseg strategy selected!"); 691 692#endif 693 694 ps->start = start; 695 ps->end = end; 696 ps->avail_start = avail_start; 697 ps->avail_end = avail_end; 698 if (preload) { 699 ps->pgs = NULL; 700 } else { 701 ps->pgs = pgs; 702 ps->lastpg = pgs + npages - 1; 703 } 704 vm_nphysseg++; 705 706 /* 707 * done! 708 */ 709 710 return; 711} 712 713#ifdef DDB /* XXXCDC: TMP TMP TMP DEBUG DEBUG DEBUG */ 714 715void uvm_page_physdump(void); /* SHUT UP GCC */ 716 717/* call from DDB */ 718void 719uvm_page_physdump(void) 720{ 721 int lcv; 722 struct vm_physseg *seg; 723 724 printf("uvm_page_physdump: physical memory config [segs=%d of %d]:\n", 725 vm_nphysseg, VM_PHYSSEG_MAX); 726 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++) 727 printf("0x%llx->0x%llx [0x%llx->0x%llx]\n", 728 (long long)seg->start, 729 (long long)seg->end, 730 (long long)seg->avail_start, 731 (long long)seg->avail_end); 732 printf("STRATEGY = "); 733 switch (VM_PHYSSEG_STRAT) { 734 case VM_PSTRAT_RANDOM: printf("RANDOM\n"); break; 735 case VM_PSTRAT_BSEARCH: printf("BSEARCH\n"); break; 736 case VM_PSTRAT_BIGFIRST: printf("BIGFIRST\n"); break; 737 default: printf("<<UNKNOWN>>!!!!\n"); 738 } 739} 740#endif 741 742void 743uvm_shutdown(void) 744{ 745#ifdef UVM_SWAP_ENCRYPT 746 uvm_swap_finicrypt_all(); 747#endif 748} 749 750/* 751 * Perform insert of a given page in the specified anon of obj. 752 * This is basically, uvm_pagealloc, but with the page already given. 753 */ 754void 755uvm_pagealloc_pg(struct vm_page *pg, struct uvm_object *obj, voff_t off, 756 struct vm_anon *anon) 757{ 758 int flags; 759 760 flags = PG_BUSY | PG_FAKE; 761 pg->offset = off; 762 pg->uobject = obj; 763 pg->uanon = anon; 764 765 if (anon) { 766 anon->an_page = pg; 767 flags |= PQ_ANON; 768 } else if (obj) 769 uvm_pageinsert(pg); 770 atomic_setbits_int(&pg->pg_flags, flags); 771#if defined(UVM_PAGE_TRKOWN) 772 pg->owner_tag = NULL; 773#endif 774 UVM_PAGE_OWN(pg, "new alloc"); 775} 776 777/* 778 * uvm_pglistalloc: allocate a list of pages 779 * 780 * => allocated pages are placed at the tail of rlist. rlist is 781 * assumed to be properly initialized by caller. 782 * => returns 0 on success or errno on failure 783 * => doesn't take into account clean non-busy pages on inactive list 784 * that could be used(?) 785 * => params: 786 * size the size of the allocation, rounded to page size. 787 * low the low address of the allowed allocation range. 788 * high the high address of the allowed allocation range. 789 * alignment memory must be aligned to this power-of-two boundary. 790 * boundary no segment in the allocation may cross this 791 * power-of-two boundary (relative to zero). 792 * => flags: 793 * UVM_PLA_NOWAIT fail if allocation fails 794 * UVM_PLA_WAITOK wait for memory to become avail 795 * UVM_PLA_ZERO return zeroed memory 796 */ 797int 798uvm_pglistalloc(psize_t size, paddr_t low, paddr_t high, paddr_t alignment, 799 paddr_t boundary, struct pglist *rlist, int nsegs, int flags) 800{ 801 KASSERT((alignment & (alignment - 1)) == 0); 802 KASSERT((boundary & (boundary - 1)) == 0); 803 KASSERT(!(flags & UVM_PLA_WAITOK) ^ !(flags & UVM_PLA_NOWAIT)); 804 805 if (size == 0) 806 return (EINVAL); 807 size = atop(round_page(size)); 808 809 /* 810 * check to see if we need to generate some free pages waking 811 * the pagedaemon. 812 */ 813 if ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freemin || 814 ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freetarg && 815 (uvmexp.inactive + BUFPAGES_INACT) < uvmexp.inactarg)) 816 wakeup(&uvm.pagedaemon); 817 818 /* 819 * XXX uvm_pglistalloc is currently only used for kernel 820 * objects. Unlike the checks in uvm_pagealloc, below, here 821 * we are always allowed to use the kernel reseve. However, we 822 * have to enforce the pagedaemon reserve here or allocations 823 * via this path could consume everything and we can't 824 * recover in the page daemon. 825 */ 826 again: 827 if ((uvmexp.free <= uvmexp.reserve_pagedaemon + size && 828 !((curproc == uvm.pagedaemon_proc) || 829 (curproc == syncerproc)))) { 830 if (flags & UVM_PLA_WAITOK) { 831 uvm_wait("uvm_pglistalloc"); 832 goto again; 833 } 834 return (ENOMEM); 835 } 836 837 if ((high & PAGE_MASK) != PAGE_MASK) { 838 printf("uvm_pglistalloc: Upper boundary 0x%lx " 839 "not on pagemask.\n", (unsigned long)high); 840 } 841 842 /* 843 * Our allocations are always page granularity, so our alignment 844 * must be, too. 845 */ 846 if (alignment < PAGE_SIZE) 847 alignment = PAGE_SIZE; 848 849 low = atop(roundup(low, alignment)); 850 /* 851 * high + 1 may result in overflow, in which case high becomes 0x0, 852 * which is the 'don't care' value. 853 * The only requirement in that case is that low is also 0x0, or the 854 * low<high assert will fail. 855 */ 856 high = atop(high + 1); 857 alignment = atop(alignment); 858 if (boundary < PAGE_SIZE && boundary != 0) 859 boundary = PAGE_SIZE; 860 boundary = atop(boundary); 861 862 return uvm_pmr_getpages(size, low, high, alignment, boundary, nsegs, 863 flags, rlist); 864} 865 866/* 867 * uvm_pglistfree: free a list of pages 868 * 869 * => pages should already be unmapped 870 */ 871void 872uvm_pglistfree(struct pglist *list) 873{ 874 uvm_pmr_freepageq(list); 875} 876 877/* 878 * interface used by the buffer cache to allocate a buffer at a time. 879 * The pages are allocated wired in DMA accessible memory 880 */ 881void 882uvm_pagealloc_multi(struct uvm_object *obj, voff_t off, vsize_t size, 883 int flags) 884{ 885 struct pglist plist; 886 struct vm_page *pg; 887 int i; 888 889 890 TAILQ_INIT(&plist); 891 (void) uvm_pglistalloc(size, dma_constraint.ucr_low, 892 dma_constraint.ucr_high, 0, 0, &plist, atop(round_page(size)), 893 UVM_PLA_WAITOK); 894 i = 0; 895 while ((pg = TAILQ_FIRST(&plist)) != NULL) { 896 pg->wire_count = 1; 897 atomic_setbits_int(&pg->pg_flags, PG_CLEAN | PG_FAKE); 898 KASSERT((pg->pg_flags & PG_DEV) == 0); 899 TAILQ_REMOVE(&plist, pg, pageq); 900 uvm_pagealloc_pg(pg, obj, off + ptoa(i++), NULL); 901 } 902} 903 904/* 905 * interface used by the buffer cache to reallocate a buffer at a time. 906 * The pages are reallocated wired outside the DMA accessible region. 907 * 908 */ 909void 910uvm_pagerealloc_multi(struct uvm_object *obj, voff_t off, vsize_t size, 911 int flags, struct uvm_constraint_range *where) 912{ 913 struct pglist plist; 914 struct vm_page *pg, *tpg; 915 int i; 916 voff_t offset; 917 918 919 TAILQ_INIT(&plist); 920 if (size == 0) 921 panic("size 0 uvm_pagerealloc"); 922 (void) uvm_pglistalloc(size, where->ucr_low, where->ucr_high, 0, 923 0, &plist, atop(round_page(size)), UVM_PLA_WAITOK); 924 i = 0; 925 while((pg = TAILQ_FIRST(&plist)) != NULL) { 926 offset = off + ptoa(i++); 927 tpg = uvm_pagelookup(obj, offset); 928 pg->wire_count = 1; 929 atomic_setbits_int(&pg->pg_flags, PG_CLEAN | PG_FAKE); 930 KASSERT((pg->pg_flags & PG_DEV) == 0); 931 TAILQ_REMOVE(&plist, pg, pageq); 932 uvm_pagecopy(tpg, pg); 933 uvm_pagefree(tpg); 934 uvm_pagealloc_pg(pg, obj, offset, NULL); 935 } 936} 937 938/* 939 * uvm_pagealloc_strat: allocate vm_page from a particular free list. 940 * 941 * => return null if no pages free 942 * => wake up pagedaemon if number of free pages drops below low water mark 943 * => if obj != NULL, obj must be locked (to put in tree) 944 * => if anon != NULL, anon must be locked (to put in anon) 945 * => only one of obj or anon can be non-null 946 * => caller must activate/deactivate page if it is not wired. 947 */ 948 949struct vm_page * 950uvm_pagealloc(struct uvm_object *obj, voff_t off, struct vm_anon *anon, 951 int flags) 952{ 953 struct vm_page *pg; 954 struct pglist pgl; 955 int pmr_flags; 956 boolean_t use_reserve; 957 958 KASSERT(obj == NULL || anon == NULL); 959 KASSERT(off == trunc_page(off)); 960 961 /* 962 * check to see if we need to generate some free pages waking 963 * the pagedaemon. 964 */ 965 if ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freemin || 966 ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freetarg && 967 (uvmexp.inactive + BUFPAGES_INACT) < uvmexp.inactarg)) 968 wakeup(&uvm.pagedaemon); 969 970 /* 971 * fail if any of these conditions is true: 972 * [1] there really are no free pages, or 973 * [2] only kernel "reserved" pages remain and 974 * the page isn't being allocated to a kernel object. 975 * [3] only pagedaemon "reserved" pages remain and 976 * the requestor isn't the pagedaemon. 977 */ 978 979 use_reserve = (flags & UVM_PGA_USERESERVE) || 980 (obj && UVM_OBJ_IS_KERN_OBJECT(obj)); 981 if ((uvmexp.free <= uvmexp.reserve_kernel && !use_reserve) || 982 (uvmexp.free <= uvmexp.reserve_pagedaemon && 983 !((curproc == uvm.pagedaemon_proc) || 984 (curproc == syncerproc)))) 985 goto fail; 986 987 pmr_flags = UVM_PLA_NOWAIT; 988 if (flags & UVM_PGA_ZERO) 989 pmr_flags |= UVM_PLA_ZERO; 990 TAILQ_INIT(&pgl); 991 if (uvm_pmr_getpages(1, 0, 0, 1, 0, 1, pmr_flags, &pgl) != 0) 992 goto fail; 993 994 pg = TAILQ_FIRST(&pgl); 995 KASSERT(pg != NULL && TAILQ_NEXT(pg, pageq) == NULL); 996 997 uvm_pagealloc_pg(pg, obj, off, anon); 998 KASSERT((pg->pg_flags & PG_DEV) == 0); 999 if (flags & UVM_PGA_ZERO) 1000 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 1001 else 1002 atomic_setbits_int(&pg->pg_flags, PG_CLEAN); 1003 1004 return(pg); 1005 1006 fail: 1007 return (NULL); 1008} 1009 1010/* 1011 * uvm_pagerealloc: reallocate a page from one object to another 1012 * 1013 * => both objects must be locked 1014 */ 1015 1016void 1017uvm_pagerealloc(struct vm_page *pg, struct uvm_object *newobj, voff_t newoff) 1018{ 1019 1020 /* 1021 * remove it from the old object 1022 */ 1023 1024 if (pg->uobject) { 1025 uvm_pageremove(pg); 1026 } 1027 1028 /* 1029 * put it in the new object 1030 */ 1031 1032 if (newobj) { 1033 pg->uobject = newobj; 1034 pg->offset = newoff; 1035 pg->pg_version++; 1036 uvm_pageinsert(pg); 1037 } 1038} 1039 1040 1041/* 1042 * uvm_pagefree: free page 1043 * 1044 * => erase page's identity (i.e. remove from object) 1045 * => put page on free list 1046 * => caller must lock owning object (either anon or uvm_object) 1047 * => caller must lock page queues 1048 * => assumes all valid mappings of pg are gone 1049 */ 1050 1051void 1052uvm_pagefree(struct vm_page *pg) 1053{ 1054 int saved_loan_count = pg->loan_count; 1055 u_int flags_to_clear = 0; 1056 1057#ifdef DEBUG 1058 if (pg->uobject == (void *)0xdeadbeef && 1059 pg->uanon == (void *)0xdeadbeef) { 1060 panic("uvm_pagefree: freeing free page %p", pg); 1061 } 1062#endif 1063 1064 KASSERT((pg->pg_flags & PG_DEV) == 0); 1065 1066 /* 1067 * if the page was an object page (and thus "TABLED"), remove it 1068 * from the object. 1069 */ 1070 1071 if (pg->pg_flags & PG_TABLED) { 1072 1073 /* 1074 * if the object page is on loan we are going to drop ownership. 1075 * it is possible that an anon will take over as owner for this 1076 * page later on. the anon will want a !PG_CLEAN page so that 1077 * it knows it needs to allocate swap if it wants to page the 1078 * page out. 1079 */ 1080 1081 /* in case an anon takes over */ 1082 if (saved_loan_count) 1083 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 1084 uvm_pageremove(pg); 1085 1086 /* 1087 * if our page was on loan, then we just lost control over it 1088 * (in fact, if it was loaned to an anon, the anon may have 1089 * already taken over ownership of the page by now and thus 1090 * changed the loan_count [e.g. in uvmfault_anonget()]) we just 1091 * return (when the last loan is dropped, then the page can be 1092 * freed by whatever was holding the last loan). 1093 */ 1094 1095 if (saved_loan_count) 1096 return; 1097 } else if (saved_loan_count && pg->uanon) { 1098 /* 1099 * if our page is owned by an anon and is loaned out to the 1100 * kernel then we just want to drop ownership and return. 1101 * the kernel must free the page when all its loans clear ... 1102 * note that the kernel can't change the loan status of our 1103 * page as long as we are holding PQ lock. 1104 */ 1105 atomic_clearbits_int(&pg->pg_flags, PQ_ANON); 1106 pg->uanon->an_page = NULL; 1107 pg->uanon = NULL; 1108 return; 1109 } 1110 KASSERT(saved_loan_count == 0); 1111 1112 /* 1113 * now remove the page from the queues 1114 */ 1115 1116 if (pg->pg_flags & PQ_ACTIVE) { 1117 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1118 flags_to_clear |= PQ_ACTIVE; 1119 uvmexp.active--; 1120 } 1121 if (pg->pg_flags & PQ_INACTIVE) { 1122 if (pg->pg_flags & PQ_SWAPBACKED) 1123 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); 1124 else 1125 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); 1126 flags_to_clear |= PQ_INACTIVE; 1127 uvmexp.inactive--; 1128 } 1129 1130 /* 1131 * if the page was wired, unwire it now. 1132 */ 1133 1134 if (pg->wire_count) { 1135 pg->wire_count = 0; 1136 uvmexp.wired--; 1137 } 1138 if (pg->uanon) { 1139 pg->uanon->an_page = NULL; 1140 pg->uanon = NULL; 1141 flags_to_clear |= PQ_ANON; 1142 } 1143 1144 /* 1145 * Clean page state bits. 1146 */ 1147 flags_to_clear |= PQ_AOBJ; /* XXX: find culprit */ 1148 flags_to_clear |= PQ_ENCRYPT|PG_ZERO|PG_FAKE|PG_BUSY|PG_RELEASED| 1149 PG_CLEAN|PG_CLEANCHK; 1150 atomic_clearbits_int(&pg->pg_flags, flags_to_clear); 1151 1152 /* 1153 * and put on free queue 1154 */ 1155 1156#ifdef DEBUG 1157 pg->uobject = (void *)0xdeadbeef; 1158 pg->offset = 0xdeadbeef; 1159 pg->uanon = (void *)0xdeadbeef; 1160#endif 1161 1162 uvm_pmr_freepages(pg, 1); 1163 1164 if (uvmexp.zeropages < UVM_PAGEZERO_TARGET) 1165 uvm.page_idle_zero = vm_page_zero_enable; 1166} 1167 1168/* 1169 * uvm_page_unbusy: unbusy an array of pages. 1170 * 1171 * => pages must either all belong to the same object, or all belong to anons. 1172 * => if pages are object-owned, object must be locked. 1173 * => if pages are anon-owned, anons must be unlockd and have 0 refcount. 1174 */ 1175 1176void 1177uvm_page_unbusy(struct vm_page **pgs, int npgs) 1178{ 1179 struct vm_page *pg; 1180 struct uvm_object *uobj; 1181 int i; 1182 1183 for (i = 0; i < npgs; i++) { 1184 pg = pgs[i]; 1185 1186 if (pg == NULL || pg == PGO_DONTCARE) { 1187 continue; 1188 } 1189 if (pg->pg_flags & PG_WANTED) { 1190 wakeup(pg); 1191 } 1192 if (pg->pg_flags & PG_RELEASED) { 1193 uobj = pg->uobject; 1194 if (uobj != NULL) { 1195 uvm_lock_pageq(); 1196 pmap_page_protect(pg, VM_PROT_NONE); 1197 /* XXX won't happen right now */ 1198 if (pg->pg_flags & PQ_AOBJ) 1199 uao_dropswap(uobj, 1200 pg->offset >> PAGE_SHIFT); 1201 uvm_pagefree(pg); 1202 uvm_unlock_pageq(); 1203 } else { 1204 atomic_clearbits_int(&pg->pg_flags, PG_BUSY); 1205 UVM_PAGE_OWN(pg, NULL); 1206 uvm_anfree(pg->uanon); 1207 } 1208 } else { 1209 atomic_clearbits_int(&pg->pg_flags, PG_WANTED|PG_BUSY); 1210 UVM_PAGE_OWN(pg, NULL); 1211 } 1212 } 1213} 1214 1215#if defined(UVM_PAGE_TRKOWN) 1216/* 1217 * uvm_page_own: set or release page ownership 1218 * 1219 * => this is a debugging function that keeps track of who sets PG_BUSY 1220 * and where they do it. it can be used to track down problems 1221 * such a process setting "PG_BUSY" and never releasing it. 1222 * => page's object [if any] must be locked 1223 * => if "tag" is NULL then we are releasing page ownership 1224 */ 1225void 1226uvm_page_own(struct vm_page *pg, char *tag) 1227{ 1228 /* gain ownership? */ 1229 if (tag) { 1230 if (pg->owner_tag) { 1231 printf("uvm_page_own: page %p already owned " 1232 "by proc %d [%s]\n", pg, 1233 pg->owner, pg->owner_tag); 1234 panic("uvm_page_own"); 1235 } 1236 pg->owner = (curproc) ? curproc->p_pid : (pid_t) -1; 1237 pg->owner_tag = tag; 1238 return; 1239 } 1240 1241 /* drop ownership */ 1242 if (pg->owner_tag == NULL) { 1243 printf("uvm_page_own: dropping ownership of an non-owned " 1244 "page (%p)\n", pg); 1245 panic("uvm_page_own"); 1246 } 1247 pg->owner_tag = NULL; 1248 return; 1249} 1250#endif 1251 1252/* 1253 * uvm_pageidlezero: zero free pages while the system is idle. 1254 * 1255 * => we do at least one iteration per call, if we are below the target. 1256 * => we loop until we either reach the target or whichqs indicates that 1257 * there is a process ready to run. 1258 */ 1259void 1260uvm_pageidlezero(void) 1261{ 1262#if 0 /* disabled: need new code */ 1263 struct vm_page *pg; 1264 struct pgfreelist *pgfl; 1265 int free_list; 1266 1267 do { 1268 uvm_lock_fpageq(); 1269 1270 if (uvmexp.zeropages >= UVM_PAGEZERO_TARGET) { 1271 uvm.page_idle_zero = FALSE; 1272 uvm_unlock_fpageq(); 1273 return; 1274 } 1275 1276 for (free_list = 0; free_list < VM_NFREELIST; free_list++) { 1277 pgfl = &uvm.page_free[free_list]; 1278 if ((pg = TAILQ_FIRST(&pgfl->pgfl_queues[ 1279 PGFL_UNKNOWN])) != NULL) 1280 break; 1281 } 1282 1283 if (pg == NULL) { 1284 /* 1285 * No non-zero'd pages; don't bother trying again 1286 * until we know we have non-zero'd pages free. 1287 */ 1288 uvm.page_idle_zero = FALSE; 1289 uvm_unlock_fpageq(); 1290 return; 1291 } 1292 1293 TAILQ_REMOVE(&pgfl->pgfl_queues[PGFL_UNKNOWN], pg, pageq); 1294 uvmexp.free--; 1295 uvm_unlock_fpageq(); 1296 1297#ifdef PMAP_PAGEIDLEZERO 1298 if (PMAP_PAGEIDLEZERO(pg) == FALSE) { 1299 /* 1300 * The machine-dependent code detected some 1301 * reason for us to abort zeroing pages, 1302 * probably because there is a process now 1303 * ready to run. 1304 */ 1305 uvm_lock_fpageq(); 1306 TAILQ_INSERT_HEAD(&pgfl->pgfl_queues[PGFL_UNKNOWN], 1307 pg, pageq); 1308 uvmexp.free++; 1309 uvmexp.zeroaborts++; 1310 uvm_unlock_fpageq(); 1311 return; 1312 } 1313#else 1314 /* 1315 * XXX This will toast the cache unless the pmap_zero_page() 1316 * XXX implementation does uncached access. 1317 */ 1318 pmap_zero_page(pg); 1319#endif 1320 atomic_setbits_int(&pg->pg_flags, PG_ZERO); 1321 1322 uvm_lock_fpageq(); 1323 TAILQ_INSERT_HEAD(&pgfl->pgfl_queues[PGFL_ZEROS], pg, pageq); 1324 uvmexp.free++; 1325 uvmexp.zeropages++; 1326 uvm_unlock_fpageq(); 1327 } while (curcpu_is_idle()); 1328#endif /* 0 */ 1329} 1330 1331/* 1332 * when VM_PHYSSEG_MAX is 1, we can simplify these functions 1333 */ 1334 1335#if VM_PHYSSEG_MAX > 1 1336/* 1337 * vm_physseg_find: find vm_physseg structure that belongs to a PA 1338 */ 1339int 1340vm_physseg_find(paddr_t pframe, int *offp) 1341{ 1342 struct vm_physseg *seg; 1343 1344#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 1345 /* binary search for it */ 1346 int start, len, try; 1347 1348 /* 1349 * if try is too large (thus target is less than than try) we reduce 1350 * the length to trunc(len/2) [i.e. everything smaller than "try"] 1351 * 1352 * if the try is too small (thus target is greater than try) then 1353 * we set the new start to be (try + 1). this means we need to 1354 * reduce the length to (round(len/2) - 1). 1355 * 1356 * note "adjust" below which takes advantage of the fact that 1357 * (round(len/2) - 1) == trunc((len - 1) / 2) 1358 * for any value of len we may have 1359 */ 1360 1361 for (start = 0, len = vm_nphysseg ; len != 0 ; len = len / 2) { 1362 try = start + (len / 2); /* try in the middle */ 1363 seg = vm_physmem + try; 1364 1365 /* start past our try? */ 1366 if (pframe >= seg->start) { 1367 /* was try correct? */ 1368 if (pframe < seg->end) { 1369 if (offp) 1370 *offp = pframe - seg->start; 1371 return(try); /* got it */ 1372 } 1373 start = try + 1; /* next time, start here */ 1374 len--; /* "adjust" */ 1375 } else { 1376 /* 1377 * pframe before try, just reduce length of 1378 * region, done in "for" loop 1379 */ 1380 } 1381 } 1382 return(-1); 1383 1384#else 1385 /* linear search for it */ 1386 int lcv; 1387 1388 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++) { 1389 if (pframe >= seg->start && pframe < seg->end) { 1390 if (offp) 1391 *offp = pframe - seg->start; 1392 return(lcv); /* got it */ 1393 } 1394 } 1395 return(-1); 1396 1397#endif 1398} 1399 1400/* 1401 * PHYS_TO_VM_PAGE: find vm_page for a PA. used by MI code to get vm_pages 1402 * back from an I/O mapping (ugh!). used in some MD code as well. 1403 */ 1404struct vm_page * 1405PHYS_TO_VM_PAGE(paddr_t pa) 1406{ 1407 paddr_t pf = atop(pa); 1408 int off; 1409 int psi; 1410 1411 psi = vm_physseg_find(pf, &off); 1412 1413 return ((psi == -1) ? NULL : &vm_physmem[psi].pgs[off]); 1414} 1415#endif /* VM_PHYSSEG_MAX > 1 */ 1416 1417/* 1418 * uvm_pagelookup: look up a page 1419 * 1420 * => caller should lock object to keep someone from pulling the page 1421 * out from under it 1422 */ 1423struct vm_page * 1424uvm_pagelookup(struct uvm_object *obj, voff_t off) 1425{ 1426 /* XXX if stack is too much, handroll */ 1427 struct vm_page pg; 1428 1429 pg.offset = off; 1430 return (RB_FIND(uvm_objtree, &obj->memt, &pg)); 1431} 1432 1433/* 1434 * uvm_pagewire: wire the page, thus removing it from the daemon's grasp 1435 * 1436 * => caller must lock page queues 1437 */ 1438void 1439uvm_pagewire(struct vm_page *pg) 1440{ 1441 if (pg->wire_count == 0) { 1442 if (pg->pg_flags & PQ_ACTIVE) { 1443 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1444 atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE); 1445 uvmexp.active--; 1446 } 1447 if (pg->pg_flags & PQ_INACTIVE) { 1448 if (pg->pg_flags & PQ_SWAPBACKED) 1449 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); 1450 else 1451 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); 1452 atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE); 1453 uvmexp.inactive--; 1454 } 1455 uvmexp.wired++; 1456 } 1457 pg->wire_count++; 1458} 1459 1460/* 1461 * uvm_pageunwire: unwire the page. 1462 * 1463 * => activate if wire count goes to zero. 1464 * => caller must lock page queues 1465 */ 1466void 1467uvm_pageunwire(struct vm_page *pg) 1468{ 1469 pg->wire_count--; 1470 if (pg->wire_count == 0) { 1471 TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq); 1472 uvmexp.active++; 1473 atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE); 1474 uvmexp.wired--; 1475 } 1476} 1477 1478/* 1479 * uvm_pagedeactivate: deactivate page -- no pmaps have access to page 1480 * 1481 * => caller must lock page queues 1482 * => caller must check to make sure page is not wired 1483 * => object that page belongs to must be locked (so we can adjust pg->flags) 1484 */ 1485void 1486uvm_pagedeactivate(struct vm_page *pg) 1487{ 1488 if (pg->pg_flags & PQ_ACTIVE) { 1489 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1490 atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE); 1491 uvmexp.active--; 1492 } 1493 if ((pg->pg_flags & PQ_INACTIVE) == 0) { 1494 KASSERT(pg->wire_count == 0); 1495 if (pg->pg_flags & PQ_SWAPBACKED) 1496 TAILQ_INSERT_TAIL(&uvm.page_inactive_swp, pg, pageq); 1497 else 1498 TAILQ_INSERT_TAIL(&uvm.page_inactive_obj, pg, pageq); 1499 atomic_setbits_int(&pg->pg_flags, PQ_INACTIVE); 1500 uvmexp.inactive++; 1501 pmap_clear_reference(pg); 1502 /* 1503 * update the "clean" bit. this isn't 100% 1504 * accurate, and doesn't have to be. we'll 1505 * re-sync it after we zap all mappings when 1506 * scanning the inactive list. 1507 */ 1508 if ((pg->pg_flags & PG_CLEAN) != 0 && 1509 pmap_is_modified(pg)) 1510 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 1511 } 1512} 1513 1514/* 1515 * uvm_pageactivate: activate page 1516 * 1517 * => caller must lock page queues 1518 */ 1519void 1520uvm_pageactivate(struct vm_page *pg) 1521{ 1522 if (pg->pg_flags & PQ_INACTIVE) { 1523 if (pg->pg_flags & PQ_SWAPBACKED) 1524 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); 1525 else 1526 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); 1527 atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE); 1528 uvmexp.inactive--; 1529 } 1530 if (pg->wire_count == 0) { 1531 1532 /* 1533 * if page is already active, remove it from list so we 1534 * can put it at tail. if it wasn't active, then mark 1535 * it active and bump active count 1536 */ 1537 if (pg->pg_flags & PQ_ACTIVE) 1538 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1539 else { 1540 atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE); 1541 uvmexp.active++; 1542 } 1543 1544 TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq); 1545 } 1546} 1547 1548/* 1549 * uvm_pagezero: zero fill a page 1550 * 1551 * => if page is part of an object then the object should be locked 1552 * to protect pg->flags. 1553 */ 1554void 1555uvm_pagezero(struct vm_page *pg) 1556{ 1557 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 1558 pmap_zero_page(pg); 1559} 1560 1561/* 1562 * uvm_pagecopy: copy a page 1563 * 1564 * => if page is part of an object then the object should be locked 1565 * to protect pg->flags. 1566 */ 1567void 1568uvm_pagecopy(struct vm_page *src, struct vm_page *dst) 1569{ 1570 atomic_clearbits_int(&dst->pg_flags, PG_CLEAN); 1571 pmap_copy_page(src, dst); 1572} 1573 1574/* 1575 * uvm_pagecount: count the number of physical pages in the address range. 1576 */ 1577psize_t 1578uvm_pagecount(struct uvm_constraint_range* constraint) 1579{ 1580 int lcv; 1581 psize_t sz; 1582 paddr_t low, high; 1583 paddr_t ps_low, ps_high; 1584 1585 /* Algorithm uses page numbers. */ 1586 low = atop(constraint->ucr_low); 1587 high = atop(constraint->ucr_high); 1588 1589 sz = 0; 1590 for (lcv = 0; lcv < vm_nphysseg; lcv++) { 1591 ps_low = MAX(low, vm_physmem[lcv].avail_start); 1592 ps_high = MIN(high, vm_physmem[lcv].avail_end); 1593 if (ps_low < ps_high) 1594 sz += ps_high - ps_low; 1595 } 1596 return sz; 1597} 1598