uvm_page.c revision 1.119
1/* $OpenBSD: uvm_page.c,v 1.119 2013/03/12 20:47:16 beck Exp $ */ 2/* $NetBSD: uvm_page.c,v 1.44 2000/11/27 08:40:04 chs Exp $ */ 3 4/* 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * Copyright (c) 1991, 1993, The Regents of the University of California. 7 * 8 * All rights reserved. 9 * 10 * This code is derived from software contributed to Berkeley by 11 * The Mach Operating System project at Carnegie-Mellon University. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by Charles D. Cranor, 24 * Washington University, the University of California, Berkeley and 25 * its contributors. 26 * 4. Neither the name of the University nor the names of its contributors 27 * may be used to endorse or promote products derived from this software 28 * without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 * 42 * @(#)vm_page.c 8.3 (Berkeley) 3/21/94 43 * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp 44 * 45 * 46 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 47 * All rights reserved. 48 * 49 * Permission to use, copy, modify and distribute this software and 50 * its documentation is hereby granted, provided that both the copyright 51 * notice and this permission notice appear in all copies of the 52 * software, derivative works or modified versions, and any portions 53 * thereof, and that both notices appear in supporting documentation. 54 * 55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 56 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 58 * 59 * Carnegie Mellon requests users of this software to return to 60 * 61 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 62 * School of Computer Science 63 * Carnegie Mellon University 64 * Pittsburgh PA 15213-3890 65 * 66 * any improvements or extensions that they make and grant Carnegie the 67 * rights to redistribute these changes. 68 */ 69 70/* 71 * uvm_page.c: page ops. 72 */ 73 74#include <sys/param.h> 75#include <sys/systm.h> 76#include <sys/sched.h> 77#include <sys/kernel.h> 78#include <sys/vnode.h> 79#include <sys/mount.h> 80#include <sys/proc.h> 81 82#include <uvm/uvm.h> 83 84/* 85 * for object trees 86 */ 87RB_GENERATE(uvm_objtree, vm_page, objt, uvm_pagecmp); 88 89int 90uvm_pagecmp(struct vm_page *a, struct vm_page *b) 91{ 92 return (a->offset < b->offset ? -1 : a->offset > b->offset); 93} 94 95/* 96 * global vars... XXXCDC: move to uvm. structure. 97 */ 98 99/* 100 * physical memory config is stored in vm_physmem. 101 */ 102 103struct vm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */ 104int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */ 105 106/* 107 * Some supported CPUs in a given architecture don't support all 108 * of the things necessary to do idle page zero'ing efficiently. 109 * We therefore provide a way to disable it from machdep code here. 110 */ 111 112/* 113 * XXX disabled until we can find a way to do this without causing 114 * problems for either cpu caches or DMA latency. 115 */ 116boolean_t vm_page_zero_enable = FALSE; 117 118/* 119 * local variables 120 */ 121 122/* 123 * these variables record the values returned by vm_page_bootstrap, 124 * for debugging purposes. The implementation of uvm_pageboot_alloc 125 * and pmap_startup here also uses them internally. 126 */ 127 128static vaddr_t virtual_space_start; 129static vaddr_t virtual_space_end; 130 131/* 132 * local prototypes 133 */ 134 135static void uvm_pageinsert(struct vm_page *); 136static void uvm_pageremove(struct vm_page *); 137 138/* 139 * inline functions 140 */ 141 142/* 143 * uvm_pageinsert: insert a page in the object 144 * 145 * => caller must lock object 146 * => caller must lock page queues XXX questionable 147 * => call should have already set pg's object and offset pointers 148 * and bumped the version counter 149 */ 150 151__inline static void 152uvm_pageinsert(struct vm_page *pg) 153{ 154 struct vm_page *dupe; 155 156 KASSERT((pg->pg_flags & PG_TABLED) == 0); 157 dupe = RB_INSERT(uvm_objtree, &pg->uobject->memt, pg); 158 /* not allowed to insert over another page */ 159 KASSERT(dupe == NULL); 160 atomic_setbits_int(&pg->pg_flags, PG_TABLED); 161 pg->uobject->uo_npages++; 162} 163 164/* 165 * uvm_page_remove: remove page from object 166 * 167 * => caller must lock object 168 * => caller must lock page queues 169 */ 170 171static __inline void 172uvm_pageremove(struct vm_page *pg) 173{ 174 175 KASSERT(pg->pg_flags & PG_TABLED); 176 RB_REMOVE(uvm_objtree, &pg->uobject->memt, pg); 177 178 atomic_clearbits_int(&pg->pg_flags, PG_TABLED); 179 pg->uobject->uo_npages--; 180 pg->uobject = NULL; 181 pg->pg_version++; 182} 183 184/* 185 * uvm_page_init: init the page system. called from uvm_init(). 186 * 187 * => we return the range of kernel virtual memory in kvm_startp/kvm_endp 188 */ 189 190void 191uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp) 192{ 193 vsize_t freepages, pagecount, n; 194 vm_page_t pagearray, curpg; 195 int lcv, i; 196 paddr_t paddr, pgno; 197 struct vm_physseg *seg; 198 199 /* 200 * init the page queues and page queue locks 201 */ 202 203 TAILQ_INIT(&uvm.page_active); 204 TAILQ_INIT(&uvm.page_inactive_swp); 205 TAILQ_INIT(&uvm.page_inactive_obj); 206 simple_lock_init(&uvm.pageqlock); 207 mtx_init(&uvm.fpageqlock, IPL_VM); 208 uvm_pmr_init(); 209 210 /* 211 * allocate vm_page structures. 212 */ 213 214 /* 215 * sanity check: 216 * before calling this function the MD code is expected to register 217 * some free RAM with the uvm_page_physload() function. our job 218 * now is to allocate vm_page structures for this memory. 219 */ 220 221 if (vm_nphysseg == 0) 222 panic("uvm_page_bootstrap: no memory pre-allocated"); 223 224 /* 225 * first calculate the number of free pages... 226 * 227 * note that we use start/end rather than avail_start/avail_end. 228 * this allows us to allocate extra vm_page structures in case we 229 * want to return some memory to the pool after booting. 230 */ 231 232 freepages = 0; 233 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++) 234 freepages += (seg->end - seg->start); 235 236 /* 237 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can 238 * use. for each page of memory we use we need a vm_page structure. 239 * thus, the total number of pages we can use is the total size of 240 * the memory divided by the PAGE_SIZE plus the size of the vm_page 241 * structure. we add one to freepages as a fudge factor to avoid 242 * truncation errors (since we can only allocate in terms of whole 243 * pages). 244 */ 245 246 pagecount = (((paddr_t)freepages + 1) << PAGE_SHIFT) / 247 (PAGE_SIZE + sizeof(struct vm_page)); 248 pagearray = (vm_page_t)uvm_pageboot_alloc(pagecount * 249 sizeof(struct vm_page)); 250 memset(pagearray, 0, pagecount * sizeof(struct vm_page)); 251 252 /* 253 * init the vm_page structures and put them in the correct place. 254 */ 255 256 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++) { 257 n = seg->end - seg->start; 258 if (n > pagecount) { 259 panic("uvm_page_init: lost %ld page(s) in init", 260 (long)(n - pagecount)); 261 /* XXXCDC: shouldn't happen? */ 262 /* n = pagecount; */ 263 } 264 265 /* set up page array pointers */ 266 seg->pgs = pagearray; 267 pagearray += n; 268 pagecount -= n; 269 seg->lastpg = seg->pgs + (n - 1); 270 271 /* init and free vm_pages (we've already zeroed them) */ 272 pgno = seg->start; 273 paddr = ptoa(pgno); 274 for (i = 0, curpg = seg->pgs; i < n; 275 i++, curpg++, pgno++, paddr += PAGE_SIZE) { 276 curpg->phys_addr = paddr; 277#ifdef __HAVE_VM_PAGE_MD 278 VM_MDPAGE_INIT(curpg); 279#endif 280 if (pgno >= seg->avail_start && 281 pgno <= seg->avail_end) { 282 uvmexp.npages++; 283 } 284 } 285 286 /* 287 * Add pages to free pool. 288 */ 289 uvm_pmr_freepages(&seg->pgs[seg->avail_start - seg->start], 290 seg->avail_end - seg->avail_start); 291 } 292 293 /* 294 * pass up the values of virtual_space_start and 295 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper 296 * layers of the VM. 297 */ 298 299 *kvm_startp = round_page(virtual_space_start); 300 *kvm_endp = trunc_page(virtual_space_end); 301 302 /* 303 * init locks for kernel threads 304 */ 305 mtx_init(&uvm.aiodoned_lock, IPL_BIO); 306 307 /* 308 * init reserve thresholds 309 * XXXCDC - values may need adjusting 310 */ 311 uvmexp.reserve_pagedaemon = 4; 312 uvmexp.reserve_kernel = 6; 313 uvmexp.anonminpct = 10; 314 uvmexp.vnodeminpct = 10; 315 uvmexp.vtextminpct = 5; 316 uvmexp.anonmin = uvmexp.anonminpct * 256 / 100; 317 uvmexp.vnodemin = uvmexp.vnodeminpct * 256 / 100; 318 uvmexp.vtextmin = uvmexp.vtextminpct * 256 / 100; 319 320 /* 321 * determine if we should zero pages in the idle loop. 322 */ 323 324 uvm.page_idle_zero = vm_page_zero_enable; 325 326 /* 327 * done! 328 */ 329 330 uvm.page_init_done = TRUE; 331} 332 333/* 334 * uvm_setpagesize: set the page size 335 * 336 * => sets page_shift and page_mask from uvmexp.pagesize. 337 */ 338 339void 340uvm_setpagesize(void) 341{ 342 if (uvmexp.pagesize == 0) 343 uvmexp.pagesize = DEFAULT_PAGE_SIZE; 344 uvmexp.pagemask = uvmexp.pagesize - 1; 345 if ((uvmexp.pagemask & uvmexp.pagesize) != 0) 346 panic("uvm_setpagesize: page size not a power of two"); 347 for (uvmexp.pageshift = 0; ; uvmexp.pageshift++) 348 if ((1 << uvmexp.pageshift) == uvmexp.pagesize) 349 break; 350} 351 352/* 353 * uvm_pageboot_alloc: steal memory from physmem for bootstrapping 354 */ 355 356vaddr_t 357uvm_pageboot_alloc(vsize_t size) 358{ 359#if defined(PMAP_STEAL_MEMORY) 360 vaddr_t addr; 361 362 /* 363 * defer bootstrap allocation to MD code (it may want to allocate 364 * from a direct-mapped segment). pmap_steal_memory should round 365 * off virtual_space_start/virtual_space_end. 366 */ 367 368 addr = pmap_steal_memory(size, &virtual_space_start, 369 &virtual_space_end); 370 371 return(addr); 372 373#else /* !PMAP_STEAL_MEMORY */ 374 375 static boolean_t initialized = FALSE; 376 vaddr_t addr, vaddr; 377 paddr_t paddr; 378 379 /* round to page size */ 380 size = round_page(size); 381 382 /* 383 * on first call to this function, initialize ourselves. 384 */ 385 if (initialized == FALSE) { 386 pmap_virtual_space(&virtual_space_start, &virtual_space_end); 387 388 /* round it the way we like it */ 389 virtual_space_start = round_page(virtual_space_start); 390 virtual_space_end = trunc_page(virtual_space_end); 391 392 initialized = TRUE; 393 } 394 395 /* 396 * allocate virtual memory for this request 397 */ 398 if (virtual_space_start == virtual_space_end || 399 (virtual_space_end - virtual_space_start) < size) 400 panic("uvm_pageboot_alloc: out of virtual space"); 401 402 addr = virtual_space_start; 403 404#ifdef PMAP_GROWKERNEL 405 /* 406 * If the kernel pmap can't map the requested space, 407 * then allocate more resources for it. 408 */ 409 if (uvm_maxkaddr < (addr + size)) { 410 uvm_maxkaddr = pmap_growkernel(addr + size); 411 if (uvm_maxkaddr < (addr + size)) 412 panic("uvm_pageboot_alloc: pmap_growkernel() failed"); 413 } 414#endif 415 416 virtual_space_start += size; 417 418 /* 419 * allocate and mapin physical pages to back new virtual pages 420 */ 421 422 for (vaddr = round_page(addr) ; vaddr < addr + size ; 423 vaddr += PAGE_SIZE) { 424 425 if (!uvm_page_physget(&paddr)) 426 panic("uvm_pageboot_alloc: out of memory"); 427 428 /* 429 * Note this memory is no longer managed, so using 430 * pmap_kenter is safe. 431 */ 432 pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE); 433 } 434 pmap_update(pmap_kernel()); 435 return(addr); 436#endif /* PMAP_STEAL_MEMORY */ 437} 438 439#if !defined(PMAP_STEAL_MEMORY) 440/* 441 * uvm_page_physget: "steal" one page from the vm_physmem structure. 442 * 443 * => attempt to allocate it off the end of a segment in which the "avail" 444 * values match the start/end values. if we can't do that, then we 445 * will advance both values (making them equal, and removing some 446 * vm_page structures from the non-avail area). 447 * => return false if out of memory. 448 */ 449 450boolean_t 451uvm_page_physget(paddr_t *paddrp) 452{ 453 int lcv; 454 struct vm_physseg *seg; 455 456 /* pass 1: try allocating from a matching end */ 457#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \ 458 (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 459 for (lcv = vm_nphysseg - 1, seg = vm_physmem + lcv; lcv >= 0; 460 lcv--, seg--) 461#else 462 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++) 463#endif 464 { 465 if (uvm.page_init_done == TRUE) 466 panic("uvm_page_physget: called _after_ bootstrap"); 467 468 /* try from front */ 469 if (seg->avail_start == seg->start && 470 seg->avail_start < seg->avail_end) { 471 *paddrp = ptoa(seg->avail_start); 472 seg->avail_start++; 473 seg->start++; 474 /* nothing left? nuke it */ 475 if (seg->avail_start == seg->end) { 476 if (vm_nphysseg == 1) 477 panic("uvm_page_physget: out of memory!"); 478 vm_nphysseg--; 479 for (; lcv < vm_nphysseg; lcv++, seg++) 480 /* structure copy */ 481 seg[0] = seg[1]; 482 } 483 return (TRUE); 484 } 485 486 /* try from rear */ 487 if (seg->avail_end == seg->end && 488 seg->avail_start < seg->avail_end) { 489 *paddrp = ptoa(seg->avail_end - 1); 490 seg->avail_end--; 491 seg->end--; 492 /* nothing left? nuke it */ 493 if (seg->avail_end == seg->start) { 494 if (vm_nphysseg == 1) 495 panic("uvm_page_physget: out of memory!"); 496 vm_nphysseg--; 497 for (; lcv < vm_nphysseg ; lcv++, seg++) 498 /* structure copy */ 499 seg[0] = seg[1]; 500 } 501 return (TRUE); 502 } 503 } 504 505 /* pass2: forget about matching ends, just allocate something */ 506#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \ 507 (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 508 for (lcv = vm_nphysseg - 1, seg = vm_physmem + lcv; lcv >= 0; 509 lcv--, seg--) 510#else 511 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++) 512#endif 513 { 514 515 /* any room in this bank? */ 516 if (seg->avail_start >= seg->avail_end) 517 continue; /* nope */ 518 519 *paddrp = ptoa(seg->avail_start); 520 seg->avail_start++; 521 /* truncate! */ 522 seg->start = seg->avail_start; 523 524 /* nothing left? nuke it */ 525 if (seg->avail_start == seg->end) { 526 if (vm_nphysseg == 1) 527 panic("uvm_page_physget: out of memory!"); 528 vm_nphysseg--; 529 for (; lcv < vm_nphysseg ; lcv++, seg++) 530 /* structure copy */ 531 seg[0] = seg[1]; 532 } 533 return (TRUE); 534 } 535 536 return (FALSE); /* whoops! */ 537} 538 539#endif /* PMAP_STEAL_MEMORY */ 540 541/* 542 * uvm_page_physload: load physical memory into VM system 543 * 544 * => all args are PFs 545 * => all pages in start/end get vm_page structures 546 * => areas marked by avail_start/avail_end get added to the free page pool 547 * => we are limited to VM_PHYSSEG_MAX physical memory segments 548 */ 549 550void 551uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start, 552 paddr_t avail_end, int flags) 553{ 554 int preload, lcv; 555 psize_t npages; 556 struct vm_page *pgs; 557 struct vm_physseg *ps, *seg; 558 559#ifdef DIAGNOSTIC 560 if (uvmexp.pagesize == 0) 561 panic("uvm_page_physload: page size not set!"); 562 563 if (start >= end) 564 panic("uvm_page_physload: start >= end"); 565#endif 566 567 /* 568 * do we have room? 569 */ 570 if (vm_nphysseg == VM_PHYSSEG_MAX) { 571 printf("uvm_page_physload: unable to load physical memory " 572 "segment\n"); 573 printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n", 574 VM_PHYSSEG_MAX, (long long)start, (long long)end); 575 printf("\tincrease VM_PHYSSEG_MAX\n"); 576 return; 577 } 578 579 /* 580 * check to see if this is a "preload" (i.e. uvm_mem_init hasn't been 581 * called yet, so malloc is not available). 582 */ 583 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg; lcv++, seg++) { 584 if (seg->pgs) 585 break; 586 } 587 preload = (lcv == vm_nphysseg); 588 589 /* 590 * if VM is already running, attempt to malloc() vm_page structures 591 */ 592 if (!preload) { 593 /* 594 * XXXCDC: need some sort of lockout for this case 595 * right now it is only used by devices so it should be alright. 596 */ 597 paddr_t paddr; 598 599 npages = end - start; /* # of pages */ 600 601 pgs = (struct vm_page *)uvm_km_zalloc(kernel_map, 602 npages * sizeof(*pgs)); 603 if (pgs == NULL) { 604 printf("uvm_page_physload: can not malloc vm_page " 605 "structs for segment\n"); 606 printf("\tignoring 0x%lx -> 0x%lx\n", start, end); 607 return; 608 } 609 /* init phys_addr and free pages, XXX uvmexp.npages */ 610 for (lcv = 0, paddr = ptoa(start); lcv < npages; 611 lcv++, paddr += PAGE_SIZE) { 612 pgs[lcv].phys_addr = paddr; 613#ifdef __HAVE_VM_PAGE_MD 614 VM_MDPAGE_INIT(&pgs[lcv]); 615#endif 616 if (atop(paddr) >= avail_start && 617 atop(paddr) <= avail_end) { 618 if (flags & PHYSLOAD_DEVICE) { 619 atomic_setbits_int(&pgs[lcv].pg_flags, 620 PG_DEV); 621 pgs[lcv].wire_count = 1; 622 } else { 623#if defined(VM_PHYSSEG_NOADD) 624 panic("uvm_page_physload: tried to add RAM after vm_mem_init"); 625#endif 626 } 627 } 628 } 629 630 /* 631 * Add pages to free pool. 632 */ 633 if ((flags & PHYSLOAD_DEVICE) == 0) { 634 uvm_pmr_freepages(&pgs[avail_start - start], 635 avail_end - avail_start); 636 } 637 638 /* XXXCDC: need hook to tell pmap to rebuild pv_list, etc... */ 639 } else { 640 641 /* gcc complains if these don't get init'd */ 642 pgs = NULL; 643 npages = 0; 644 645 } 646 647 /* 648 * now insert us in the proper place in vm_physmem[] 649 */ 650 651#if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM) 652 653 /* random: put it at the end (easy!) */ 654 ps = &vm_physmem[vm_nphysseg]; 655 656#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 657 658 { 659 int x; 660 /* sort by address for binary search */ 661 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg; lcv++, seg++) 662 if (start < seg->start) 663 break; 664 ps = seg; 665 /* move back other entries, if necessary ... */ 666 for (x = vm_nphysseg, seg = vm_physmem + x - 1; x > lcv; 667 x--, seg--) 668 /* structure copy */ 669 seg[1] = seg[0]; 670 } 671 672#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) 673 674 { 675 int x; 676 /* sort by largest segment first */ 677 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg; lcv++, seg++) 678 if ((end - start) > 679 (seg->end - seg->start)) 680 break; 681 ps = &vm_physmem[lcv]; 682 /* move back other entries, if necessary ... */ 683 for (x = vm_nphysseg, seg = vm_physmem + x - 1; x > lcv; 684 x--, seg--) 685 /* structure copy */ 686 seg[1] = seg[0]; 687 } 688 689#else 690 691 panic("uvm_page_physload: unknown physseg strategy selected!"); 692 693#endif 694 695 ps->start = start; 696 ps->end = end; 697 ps->avail_start = avail_start; 698 ps->avail_end = avail_end; 699 if (preload) { 700 ps->pgs = NULL; 701 } else { 702 ps->pgs = pgs; 703 ps->lastpg = pgs + npages - 1; 704 } 705 vm_nphysseg++; 706 707 /* 708 * done! 709 */ 710 711 return; 712} 713 714#ifdef DDB /* XXXCDC: TMP TMP TMP DEBUG DEBUG DEBUG */ 715 716void uvm_page_physdump(void); /* SHUT UP GCC */ 717 718/* call from DDB */ 719void 720uvm_page_physdump(void) 721{ 722 int lcv; 723 struct vm_physseg *seg; 724 725 printf("uvm_page_physdump: physical memory config [segs=%d of %d]:\n", 726 vm_nphysseg, VM_PHYSSEG_MAX); 727 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++) 728 printf("0x%llx->0x%llx [0x%llx->0x%llx]\n", 729 (long long)seg->start, 730 (long long)seg->end, 731 (long long)seg->avail_start, 732 (long long)seg->avail_end); 733 printf("STRATEGY = "); 734 switch (VM_PHYSSEG_STRAT) { 735 case VM_PSTRAT_RANDOM: printf("RANDOM\n"); break; 736 case VM_PSTRAT_BSEARCH: printf("BSEARCH\n"); break; 737 case VM_PSTRAT_BIGFIRST: printf("BIGFIRST\n"); break; 738 default: printf("<<UNKNOWN>>!!!!\n"); 739 } 740} 741#endif 742 743void 744uvm_shutdown(void) 745{ 746#ifdef UVM_SWAP_ENCRYPT 747 uvm_swap_finicrypt_all(); 748#endif 749} 750 751/* 752 * Perform insert of a given page in the specified anon of obj. 753 * This is basically, uvm_pagealloc, but with the page already given. 754 */ 755void 756uvm_pagealloc_pg(struct vm_page *pg, struct uvm_object *obj, voff_t off, 757 struct vm_anon *anon) 758{ 759 int flags; 760 761 flags = PG_BUSY | PG_FAKE; 762 pg->offset = off; 763 pg->uobject = obj; 764 pg->uanon = anon; 765 766 if (anon) { 767 anon->an_page = pg; 768 flags |= PQ_ANON; 769 } else if (obj) 770 uvm_pageinsert(pg); 771 atomic_setbits_int(&pg->pg_flags, flags); 772#if defined(UVM_PAGE_TRKOWN) 773 pg->owner_tag = NULL; 774#endif 775 UVM_PAGE_OWN(pg, "new alloc"); 776} 777 778/* 779 * uvm_pglistalloc: allocate a list of pages 780 * 781 * => allocated pages are placed at the tail of rlist. rlist is 782 * assumed to be properly initialized by caller. 783 * => returns 0 on success or errno on failure 784 * => doesn't take into account clean non-busy pages on inactive list 785 * that could be used(?) 786 * => params: 787 * size the size of the allocation, rounded to page size. 788 * low the low address of the allowed allocation range. 789 * high the high address of the allowed allocation range. 790 * alignment memory must be aligned to this power-of-two boundary. 791 * boundary no segment in the allocation may cross this 792 * power-of-two boundary (relative to zero). 793 * => flags: 794 * UVM_PLA_NOWAIT fail if allocation fails 795 * UVM_PLA_WAITOK wait for memory to become avail 796 * UVM_PLA_ZERO return zeroed memory 797 */ 798int 799uvm_pglistalloc(psize_t size, paddr_t low, paddr_t high, paddr_t alignment, 800 paddr_t boundary, struct pglist *rlist, int nsegs, int flags) 801{ 802 KASSERT((alignment & (alignment - 1)) == 0); 803 KASSERT((boundary & (boundary - 1)) == 0); 804 KASSERT(!(flags & UVM_PLA_WAITOK) ^ !(flags & UVM_PLA_NOWAIT)); 805 806 if (size == 0) 807 return (EINVAL); 808 size = atop(round_page(size)); 809 810 /* 811 * check to see if we need to generate some free pages waking 812 * the pagedaemon. 813 */ 814 if ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freemin || 815 ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freetarg && 816 (uvmexp.inactive + BUFPAGES_INACT) < uvmexp.inactarg)) 817 wakeup(&uvm.pagedaemon); 818 819 /* 820 * XXX uvm_pglistalloc is currently only used for kernel 821 * objects. Unlike the checks in uvm_pagealloc, below, here 822 * we are always allowed to use the kernel reseve. However, we 823 * have to enforce the pagedaemon reserve here or allocations 824 * via this path could consume everything and we can't 825 * recover in the page daemon. 826 */ 827 again: 828 if ((uvmexp.free <= uvmexp.reserve_pagedaemon + size && 829 !((curproc == uvm.pagedaemon_proc) || 830 (curproc == syncerproc)))) { 831 if (flags & UVM_PLA_WAITOK) { 832 uvm_wait("uvm_pglistalloc"); 833 goto again; 834 } 835 return (ENOMEM); 836 } 837 838 if ((high & PAGE_MASK) != PAGE_MASK) { 839 printf("uvm_pglistalloc: Upper boundary 0x%lx " 840 "not on pagemask.\n", (unsigned long)high); 841 } 842 843 /* 844 * Our allocations are always page granularity, so our alignment 845 * must be, too. 846 */ 847 if (alignment < PAGE_SIZE) 848 alignment = PAGE_SIZE; 849 850 low = atop(roundup(low, alignment)); 851 /* 852 * high + 1 may result in overflow, in which case high becomes 0x0, 853 * which is the 'don't care' value. 854 * The only requirement in that case is that low is also 0x0, or the 855 * low<high assert will fail. 856 */ 857 high = atop(high + 1); 858 alignment = atop(alignment); 859 if (boundary < PAGE_SIZE && boundary != 0) 860 boundary = PAGE_SIZE; 861 boundary = atop(boundary); 862 863 return uvm_pmr_getpages(size, low, high, alignment, boundary, nsegs, 864 flags, rlist); 865} 866 867/* 868 * uvm_pglistfree: free a list of pages 869 * 870 * => pages should already be unmapped 871 */ 872void 873uvm_pglistfree(struct pglist *list) 874{ 875 uvm_pmr_freepageq(list); 876} 877 878/* 879 * interface used by the buffer cache to allocate a buffer at a time. 880 * The pages are allocated wired in DMA accessible memory 881 */ 882void 883uvm_pagealloc_multi(struct uvm_object *obj, voff_t off, vsize_t size, 884 int flags) 885{ 886 struct pglist plist; 887 struct vm_page *pg; 888 int i; 889 890 891 TAILQ_INIT(&plist); 892 (void) uvm_pglistalloc(size, dma_constraint.ucr_low, 893 dma_constraint.ucr_high, 0, 0, &plist, atop(round_page(size)), 894 UVM_PLA_WAITOK); 895 i = 0; 896 while ((pg = TAILQ_FIRST(&plist)) != NULL) { 897 pg->wire_count = 1; 898 atomic_setbits_int(&pg->pg_flags, PG_CLEAN | PG_FAKE); 899 KASSERT((pg->pg_flags & PG_DEV) == 0); 900 TAILQ_REMOVE(&plist, pg, pageq); 901 uvm_pagealloc_pg(pg, obj, off + ptoa(i++), NULL); 902 } 903} 904 905/* 906 * interface used by the buffer cache to reallocate a buffer at a time. 907 * The pages are reallocated wired outside the DMA accessible region. 908 * 909 */ 910void 911uvm_pagerealloc_multi(struct uvm_object *obj, voff_t off, vsize_t size, 912 int flags, struct uvm_constraint_range *where) 913{ 914 struct pglist plist; 915 struct vm_page *pg, *tpg; 916 int i; 917 voff_t offset; 918 919 920 TAILQ_INIT(&plist); 921 if (size == 0) 922 panic("size 0 uvm_pagerealloc"); 923 (void) uvm_pglistalloc(size, where->ucr_low, where->ucr_high, 0, 924 0, &plist, atop(round_page(size)), UVM_PLA_WAITOK); 925 i = 0; 926 while((pg = TAILQ_FIRST(&plist)) != NULL) { 927 offset = off + ptoa(i++); 928 tpg = uvm_pagelookup(obj, offset); 929 pg->wire_count = 1; 930 atomic_setbits_int(&pg->pg_flags, PG_CLEAN | PG_FAKE); 931 KASSERT((pg->pg_flags & PG_DEV) == 0); 932 TAILQ_REMOVE(&plist, pg, pageq); 933 uvm_pagecopy(tpg, pg); 934 uvm_pagefree(tpg); 935 uvm_pagealloc_pg(pg, obj, offset, NULL); 936 } 937} 938 939/* 940 * uvm_pagealloc_strat: allocate vm_page from a particular free list. 941 * 942 * => return null if no pages free 943 * => wake up pagedaemon if number of free pages drops below low water mark 944 * => if obj != NULL, obj must be locked (to put in tree) 945 * => if anon != NULL, anon must be locked (to put in anon) 946 * => only one of obj or anon can be non-null 947 * => caller must activate/deactivate page if it is not wired. 948 */ 949 950struct vm_page * 951uvm_pagealloc(struct uvm_object *obj, voff_t off, struct vm_anon *anon, 952 int flags) 953{ 954 struct vm_page *pg; 955 struct pglist pgl; 956 int pmr_flags; 957 boolean_t use_reserve; 958 959 KASSERT(obj == NULL || anon == NULL); 960 KASSERT(off == trunc_page(off)); 961 962 /* 963 * check to see if we need to generate some free pages waking 964 * the pagedaemon. 965 */ 966 if ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freemin || 967 ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freetarg && 968 (uvmexp.inactive + BUFPAGES_INACT) < uvmexp.inactarg)) 969 wakeup(&uvm.pagedaemon); 970 971 /* 972 * fail if any of these conditions is true: 973 * [1] there really are no free pages, or 974 * [2] only kernel "reserved" pages remain and 975 * the page isn't being allocated to a kernel object. 976 * [3] only pagedaemon "reserved" pages remain and 977 * the requestor isn't the pagedaemon. 978 */ 979 980 use_reserve = (flags & UVM_PGA_USERESERVE) || 981 (obj && UVM_OBJ_IS_KERN_OBJECT(obj)); 982 if ((uvmexp.free <= uvmexp.reserve_kernel && !use_reserve) || 983 (uvmexp.free <= uvmexp.reserve_pagedaemon && 984 !((curproc == uvm.pagedaemon_proc) || 985 (curproc == syncerproc)))) 986 goto fail; 987 988 pmr_flags = UVM_PLA_NOWAIT; 989 if (flags & UVM_PGA_ZERO) 990 pmr_flags |= UVM_PLA_ZERO; 991 TAILQ_INIT(&pgl); 992 if (uvm_pmr_getpages(1, 0, 0, 1, 0, 1, pmr_flags, &pgl) != 0) 993 goto fail; 994 995 pg = TAILQ_FIRST(&pgl); 996 KASSERT(pg != NULL && TAILQ_NEXT(pg, pageq) == NULL); 997 998 uvm_pagealloc_pg(pg, obj, off, anon); 999 KASSERT((pg->pg_flags & PG_DEV) == 0); 1000 if (flags & UVM_PGA_ZERO) 1001 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 1002 else 1003 atomic_setbits_int(&pg->pg_flags, PG_CLEAN); 1004 1005 return(pg); 1006 1007 fail: 1008 return (NULL); 1009} 1010 1011/* 1012 * uvm_pagerealloc: reallocate a page from one object to another 1013 * 1014 * => both objects must be locked 1015 */ 1016 1017void 1018uvm_pagerealloc(struct vm_page *pg, struct uvm_object *newobj, voff_t newoff) 1019{ 1020 1021 /* 1022 * remove it from the old object 1023 */ 1024 1025 if (pg->uobject) { 1026 uvm_pageremove(pg); 1027 } 1028 1029 /* 1030 * put it in the new object 1031 */ 1032 1033 if (newobj) { 1034 pg->uobject = newobj; 1035 pg->offset = newoff; 1036 pg->pg_version++; 1037 uvm_pageinsert(pg); 1038 } 1039} 1040 1041 1042/* 1043 * uvm_pagefree: free page 1044 * 1045 * => erase page's identity (i.e. remove from object) 1046 * => put page on free list 1047 * => caller must lock owning object (either anon or uvm_object) 1048 * => caller must lock page queues 1049 * => assumes all valid mappings of pg are gone 1050 */ 1051 1052void 1053uvm_pagefree(struct vm_page *pg) 1054{ 1055 int saved_loan_count = pg->loan_count; 1056 1057#ifdef DEBUG 1058 if (pg->uobject == (void *)0xdeadbeef && 1059 pg->uanon == (void *)0xdeadbeef) { 1060 panic("uvm_pagefree: freeing free page %p", pg); 1061 } 1062#endif 1063 1064 KASSERT((pg->pg_flags & PG_DEV) == 0); 1065 1066 /* 1067 * if the page was an object page (and thus "TABLED"), remove it 1068 * from the object. 1069 */ 1070 1071 if (pg->pg_flags & PG_TABLED) { 1072 1073 /* 1074 * if the object page is on loan we are going to drop ownership. 1075 * it is possible that an anon will take over as owner for this 1076 * page later on. the anon will want a !PG_CLEAN page so that 1077 * it knows it needs to allocate swap if it wants to page the 1078 * page out. 1079 */ 1080 1081 /* in case an anon takes over */ 1082 if (saved_loan_count) 1083 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 1084 uvm_pageremove(pg); 1085 1086 /* 1087 * if our page was on loan, then we just lost control over it 1088 * (in fact, if it was loaned to an anon, the anon may have 1089 * already taken over ownership of the page by now and thus 1090 * changed the loan_count [e.g. in uvmfault_anonget()]) we just 1091 * return (when the last loan is dropped, then the page can be 1092 * freed by whatever was holding the last loan). 1093 */ 1094 1095 if (saved_loan_count) 1096 return; 1097 } else if (saved_loan_count && pg->uanon) { 1098 /* 1099 * if our page is owned by an anon and is loaned out to the 1100 * kernel then we just want to drop ownership and return. 1101 * the kernel must free the page when all its loans clear ... 1102 * note that the kernel can't change the loan status of our 1103 * page as long as we are holding PQ lock. 1104 */ 1105 atomic_clearbits_int(&pg->pg_flags, PQ_ANON); 1106 pg->uanon->an_page = NULL; 1107 pg->uanon = NULL; 1108 return; 1109 } 1110 KASSERT(saved_loan_count == 0); 1111 1112 /* 1113 * now remove the page from the queues 1114 */ 1115 1116 if (pg->pg_flags & PQ_ACTIVE) { 1117 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1118 atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE); 1119 uvmexp.active--; 1120 } 1121 if (pg->pg_flags & PQ_INACTIVE) { 1122 if (pg->pg_flags & PQ_SWAPBACKED) 1123 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); 1124 else 1125 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); 1126 atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE); 1127 uvmexp.inactive--; 1128 } 1129 1130 /* 1131 * if the page was wired, unwire it now. 1132 */ 1133 1134 if (pg->wire_count) { 1135 pg->wire_count = 0; 1136 uvmexp.wired--; 1137 } 1138 if (pg->uanon) { 1139 pg->uanon->an_page = NULL; 1140 pg->uanon = NULL; 1141 atomic_clearbits_int(&pg->pg_flags, PQ_ANON); 1142 } 1143 1144 /* 1145 * Clean page state bits. 1146 */ 1147 atomic_clearbits_int(&pg->pg_flags, PQ_AOBJ); /* XXX: find culprit */ 1148 atomic_clearbits_int(&pg->pg_flags, PQ_ENCRYPT| 1149 PG_ZERO|PG_FAKE|PG_BUSY|PG_RELEASED|PG_CLEAN|PG_CLEANCHK); 1150 1151 /* 1152 * and put on free queue 1153 */ 1154 1155#ifdef DEBUG 1156 pg->uobject = (void *)0xdeadbeef; 1157 pg->offset = 0xdeadbeef; 1158 pg->uanon = (void *)0xdeadbeef; 1159#endif 1160 1161 uvm_pmr_freepages(pg, 1); 1162 1163 if (uvmexp.zeropages < UVM_PAGEZERO_TARGET) 1164 uvm.page_idle_zero = vm_page_zero_enable; 1165} 1166 1167/* 1168 * uvm_page_unbusy: unbusy an array of pages. 1169 * 1170 * => pages must either all belong to the same object, or all belong to anons. 1171 * => if pages are object-owned, object must be locked. 1172 * => if pages are anon-owned, anons must be unlockd and have 0 refcount. 1173 */ 1174 1175void 1176uvm_page_unbusy(struct vm_page **pgs, int npgs) 1177{ 1178 struct vm_page *pg; 1179 struct uvm_object *uobj; 1180 int i; 1181 1182 for (i = 0; i < npgs; i++) { 1183 pg = pgs[i]; 1184 1185 if (pg == NULL || pg == PGO_DONTCARE) { 1186 continue; 1187 } 1188 if (pg->pg_flags & PG_WANTED) { 1189 wakeup(pg); 1190 } 1191 if (pg->pg_flags & PG_RELEASED) { 1192 uobj = pg->uobject; 1193 if (uobj != NULL) { 1194 uvm_lock_pageq(); 1195 pmap_page_protect(pg, VM_PROT_NONE); 1196 /* XXX won't happen right now */ 1197 if (pg->pg_flags & PQ_AOBJ) 1198 uao_dropswap(uobj, 1199 pg->offset >> PAGE_SHIFT); 1200 uvm_pagefree(pg); 1201 uvm_unlock_pageq(); 1202 } else { 1203 atomic_clearbits_int(&pg->pg_flags, PG_BUSY); 1204 UVM_PAGE_OWN(pg, NULL); 1205 uvm_anfree(pg->uanon); 1206 } 1207 } else { 1208 atomic_clearbits_int(&pg->pg_flags, PG_WANTED|PG_BUSY); 1209 UVM_PAGE_OWN(pg, NULL); 1210 } 1211 } 1212} 1213 1214#if defined(UVM_PAGE_TRKOWN) 1215/* 1216 * uvm_page_own: set or release page ownership 1217 * 1218 * => this is a debugging function that keeps track of who sets PG_BUSY 1219 * and where they do it. it can be used to track down problems 1220 * such a process setting "PG_BUSY" and never releasing it. 1221 * => page's object [if any] must be locked 1222 * => if "tag" is NULL then we are releasing page ownership 1223 */ 1224void 1225uvm_page_own(struct vm_page *pg, char *tag) 1226{ 1227 /* gain ownership? */ 1228 if (tag) { 1229 if (pg->owner_tag) { 1230 printf("uvm_page_own: page %p already owned " 1231 "by proc %d [%s]\n", pg, 1232 pg->owner, pg->owner_tag); 1233 panic("uvm_page_own"); 1234 } 1235 pg->owner = (curproc) ? curproc->p_pid : (pid_t) -1; 1236 pg->owner_tag = tag; 1237 return; 1238 } 1239 1240 /* drop ownership */ 1241 if (pg->owner_tag == NULL) { 1242 printf("uvm_page_own: dropping ownership of an non-owned " 1243 "page (%p)\n", pg); 1244 panic("uvm_page_own"); 1245 } 1246 pg->owner_tag = NULL; 1247 return; 1248} 1249#endif 1250 1251/* 1252 * uvm_pageidlezero: zero free pages while the system is idle. 1253 * 1254 * => we do at least one iteration per call, if we are below the target. 1255 * => we loop until we either reach the target or whichqs indicates that 1256 * there is a process ready to run. 1257 */ 1258void 1259uvm_pageidlezero(void) 1260{ 1261#if 0 /* disabled: need new code */ 1262 struct vm_page *pg; 1263 struct pgfreelist *pgfl; 1264 int free_list; 1265 1266 do { 1267 uvm_lock_fpageq(); 1268 1269 if (uvmexp.zeropages >= UVM_PAGEZERO_TARGET) { 1270 uvm.page_idle_zero = FALSE; 1271 uvm_unlock_fpageq(); 1272 return; 1273 } 1274 1275 for (free_list = 0; free_list < VM_NFREELIST; free_list++) { 1276 pgfl = &uvm.page_free[free_list]; 1277 if ((pg = TAILQ_FIRST(&pgfl->pgfl_queues[ 1278 PGFL_UNKNOWN])) != NULL) 1279 break; 1280 } 1281 1282 if (pg == NULL) { 1283 /* 1284 * No non-zero'd pages; don't bother trying again 1285 * until we know we have non-zero'd pages free. 1286 */ 1287 uvm.page_idle_zero = FALSE; 1288 uvm_unlock_fpageq(); 1289 return; 1290 } 1291 1292 TAILQ_REMOVE(&pgfl->pgfl_queues[PGFL_UNKNOWN], pg, pageq); 1293 uvmexp.free--; 1294 uvm_unlock_fpageq(); 1295 1296#ifdef PMAP_PAGEIDLEZERO 1297 if (PMAP_PAGEIDLEZERO(pg) == FALSE) { 1298 /* 1299 * The machine-dependent code detected some 1300 * reason for us to abort zeroing pages, 1301 * probably because there is a process now 1302 * ready to run. 1303 */ 1304 uvm_lock_fpageq(); 1305 TAILQ_INSERT_HEAD(&pgfl->pgfl_queues[PGFL_UNKNOWN], 1306 pg, pageq); 1307 uvmexp.free++; 1308 uvmexp.zeroaborts++; 1309 uvm_unlock_fpageq(); 1310 return; 1311 } 1312#else 1313 /* 1314 * XXX This will toast the cache unless the pmap_zero_page() 1315 * XXX implementation does uncached access. 1316 */ 1317 pmap_zero_page(pg); 1318#endif 1319 atomic_setbits_int(&pg->pg_flags, PG_ZERO); 1320 1321 uvm_lock_fpageq(); 1322 TAILQ_INSERT_HEAD(&pgfl->pgfl_queues[PGFL_ZEROS], pg, pageq); 1323 uvmexp.free++; 1324 uvmexp.zeropages++; 1325 uvm_unlock_fpageq(); 1326 } while (curcpu_is_idle()); 1327#endif /* 0 */ 1328} 1329 1330/* 1331 * when VM_PHYSSEG_MAX is 1, we can simplify these functions 1332 */ 1333 1334#if VM_PHYSSEG_MAX > 1 1335/* 1336 * vm_physseg_find: find vm_physseg structure that belongs to a PA 1337 */ 1338int 1339vm_physseg_find(paddr_t pframe, int *offp) 1340{ 1341 struct vm_physseg *seg; 1342 1343#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 1344 /* binary search for it */ 1345 int start, len, try; 1346 1347 /* 1348 * if try is too large (thus target is less than than try) we reduce 1349 * the length to trunc(len/2) [i.e. everything smaller than "try"] 1350 * 1351 * if the try is too small (thus target is greater than try) then 1352 * we set the new start to be (try + 1). this means we need to 1353 * reduce the length to (round(len/2) - 1). 1354 * 1355 * note "adjust" below which takes advantage of the fact that 1356 * (round(len/2) - 1) == trunc((len - 1) / 2) 1357 * for any value of len we may have 1358 */ 1359 1360 for (start = 0, len = vm_nphysseg ; len != 0 ; len = len / 2) { 1361 try = start + (len / 2); /* try in the middle */ 1362 seg = vm_physmem + try; 1363 1364 /* start past our try? */ 1365 if (pframe >= seg->start) { 1366 /* was try correct? */ 1367 if (pframe < seg->end) { 1368 if (offp) 1369 *offp = pframe - seg->start; 1370 return(try); /* got it */ 1371 } 1372 start = try + 1; /* next time, start here */ 1373 len--; /* "adjust" */ 1374 } else { 1375 /* 1376 * pframe before try, just reduce length of 1377 * region, done in "for" loop 1378 */ 1379 } 1380 } 1381 return(-1); 1382 1383#else 1384 /* linear search for it */ 1385 int lcv; 1386 1387 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++) { 1388 if (pframe >= seg->start && pframe < seg->end) { 1389 if (offp) 1390 *offp = pframe - seg->start; 1391 return(lcv); /* got it */ 1392 } 1393 } 1394 return(-1); 1395 1396#endif 1397} 1398 1399/* 1400 * PHYS_TO_VM_PAGE: find vm_page for a PA. used by MI code to get vm_pages 1401 * back from an I/O mapping (ugh!). used in some MD code as well. 1402 */ 1403struct vm_page * 1404PHYS_TO_VM_PAGE(paddr_t pa) 1405{ 1406 paddr_t pf = atop(pa); 1407 int off; 1408 int psi; 1409 1410 psi = vm_physseg_find(pf, &off); 1411 1412 return ((psi == -1) ? NULL : &vm_physmem[psi].pgs[off]); 1413} 1414#endif /* VM_PHYSSEG_MAX > 1 */ 1415 1416/* 1417 * uvm_pagelookup: look up a page 1418 * 1419 * => caller should lock object to keep someone from pulling the page 1420 * out from under it 1421 */ 1422struct vm_page * 1423uvm_pagelookup(struct uvm_object *obj, voff_t off) 1424{ 1425 /* XXX if stack is too much, handroll */ 1426 struct vm_page pg; 1427 1428 pg.offset = off; 1429 return (RB_FIND(uvm_objtree, &obj->memt, &pg)); 1430} 1431 1432/* 1433 * uvm_pagewire: wire the page, thus removing it from the daemon's grasp 1434 * 1435 * => caller must lock page queues 1436 */ 1437void 1438uvm_pagewire(struct vm_page *pg) 1439{ 1440 if (pg->wire_count == 0) { 1441 if (pg->pg_flags & PQ_ACTIVE) { 1442 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1443 atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE); 1444 uvmexp.active--; 1445 } 1446 if (pg->pg_flags & PQ_INACTIVE) { 1447 if (pg->pg_flags & PQ_SWAPBACKED) 1448 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); 1449 else 1450 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); 1451 atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE); 1452 uvmexp.inactive--; 1453 } 1454 uvmexp.wired++; 1455 } 1456 pg->wire_count++; 1457} 1458 1459/* 1460 * uvm_pageunwire: unwire the page. 1461 * 1462 * => activate if wire count goes to zero. 1463 * => caller must lock page queues 1464 */ 1465void 1466uvm_pageunwire(struct vm_page *pg) 1467{ 1468 pg->wire_count--; 1469 if (pg->wire_count == 0) { 1470 TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq); 1471 uvmexp.active++; 1472 atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE); 1473 uvmexp.wired--; 1474 } 1475} 1476 1477/* 1478 * uvm_pagedeactivate: deactivate page -- no pmaps have access to page 1479 * 1480 * => caller must lock page queues 1481 * => caller must check to make sure page is not wired 1482 * => object that page belongs to must be locked (so we can adjust pg->flags) 1483 */ 1484void 1485uvm_pagedeactivate(struct vm_page *pg) 1486{ 1487 if (pg->pg_flags & PQ_ACTIVE) { 1488 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1489 atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE); 1490 uvmexp.active--; 1491 } 1492 if ((pg->pg_flags & PQ_INACTIVE) == 0) { 1493 KASSERT(pg->wire_count == 0); 1494 if (pg->pg_flags & PQ_SWAPBACKED) 1495 TAILQ_INSERT_TAIL(&uvm.page_inactive_swp, pg, pageq); 1496 else 1497 TAILQ_INSERT_TAIL(&uvm.page_inactive_obj, pg, pageq); 1498 atomic_setbits_int(&pg->pg_flags, PQ_INACTIVE); 1499 uvmexp.inactive++; 1500 pmap_clear_reference(pg); 1501 /* 1502 * update the "clean" bit. this isn't 100% 1503 * accurate, and doesn't have to be. we'll 1504 * re-sync it after we zap all mappings when 1505 * scanning the inactive list. 1506 */ 1507 if ((pg->pg_flags & PG_CLEAN) != 0 && 1508 pmap_is_modified(pg)) 1509 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 1510 } 1511} 1512 1513/* 1514 * uvm_pageactivate: activate page 1515 * 1516 * => caller must lock page queues 1517 */ 1518void 1519uvm_pageactivate(struct vm_page *pg) 1520{ 1521 if (pg->pg_flags & PQ_INACTIVE) { 1522 if (pg->pg_flags & PQ_SWAPBACKED) 1523 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); 1524 else 1525 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); 1526 atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE); 1527 uvmexp.inactive--; 1528 } 1529 if (pg->wire_count == 0) { 1530 1531 /* 1532 * if page is already active, remove it from list so we 1533 * can put it at tail. if it wasn't active, then mark 1534 * it active and bump active count 1535 */ 1536 if (pg->pg_flags & PQ_ACTIVE) 1537 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1538 else { 1539 atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE); 1540 uvmexp.active++; 1541 } 1542 1543 TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq); 1544 } 1545} 1546 1547/* 1548 * uvm_pagezero: zero fill a page 1549 * 1550 * => if page is part of an object then the object should be locked 1551 * to protect pg->flags. 1552 */ 1553void 1554uvm_pagezero(struct vm_page *pg) 1555{ 1556 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 1557 pmap_zero_page(pg); 1558} 1559 1560/* 1561 * uvm_pagecopy: copy a page 1562 * 1563 * => if page is part of an object then the object should be locked 1564 * to protect pg->flags. 1565 */ 1566void 1567uvm_pagecopy(struct vm_page *src, struct vm_page *dst) 1568{ 1569 atomic_clearbits_int(&dst->pg_flags, PG_CLEAN); 1570 pmap_copy_page(src, dst); 1571} 1572 1573/* 1574 * uvm_pagecount: count the number of physical pages in the address range. 1575 */ 1576psize_t 1577uvm_pagecount(struct uvm_constraint_range* constraint) 1578{ 1579 int lcv; 1580 psize_t sz; 1581 paddr_t low, high; 1582 paddr_t ps_low, ps_high; 1583 1584 /* Algorithm uses page numbers. */ 1585 low = atop(constraint->ucr_low); 1586 high = atop(constraint->ucr_high); 1587 1588 sz = 0; 1589 for (lcv = 0; lcv < vm_nphysseg; lcv++) { 1590 ps_low = MAX(low, vm_physmem[lcv].avail_start); 1591 ps_high = MIN(high, vm_physmem[lcv].avail_end); 1592 if (ps_low < ps_high) 1593 sz += ps_high - ps_low; 1594 } 1595 return sz; 1596} 1597