uvm_page.c revision 1.129
1/* $OpenBSD: uvm_page.c,v 1.129 2014/01/23 22:06:30 miod Exp $ */ 2/* $NetBSD: uvm_page.c,v 1.44 2000/11/27 08:40:04 chs Exp $ */ 3 4/* 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * Copyright (c) 1991, 1993, The Regents of the University of California. 7 * 8 * All rights reserved. 9 * 10 * This code is derived from software contributed to Berkeley by 11 * The Mach Operating System project at Carnegie-Mellon University. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by Charles D. Cranor, 24 * Washington University, the University of California, Berkeley and 25 * its contributors. 26 * 4. Neither the name of the University nor the names of its contributors 27 * may be used to endorse or promote products derived from this software 28 * without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 * 42 * @(#)vm_page.c 8.3 (Berkeley) 3/21/94 43 * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp 44 * 45 * 46 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 47 * All rights reserved. 48 * 49 * Permission to use, copy, modify and distribute this software and 50 * its documentation is hereby granted, provided that both the copyright 51 * notice and this permission notice appear in all copies of the 52 * software, derivative works or modified versions, and any portions 53 * thereof, and that both notices appear in supporting documentation. 54 * 55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 56 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 58 * 59 * Carnegie Mellon requests users of this software to return to 60 * 61 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 62 * School of Computer Science 63 * Carnegie Mellon University 64 * Pittsburgh PA 15213-3890 65 * 66 * any improvements or extensions that they make and grant Carnegie the 67 * rights to redistribute these changes. 68 */ 69 70/* 71 * uvm_page.c: page ops. 72 */ 73 74#include <sys/param.h> 75#include <sys/systm.h> 76#include <sys/sched.h> 77#include <sys/kernel.h> 78#include <sys/vnode.h> 79#include <sys/mount.h> 80#include <sys/proc.h> 81 82#include <uvm/uvm.h> 83 84/* 85 * for object trees 86 */ 87RB_GENERATE(uvm_objtree, vm_page, objt, uvm_pagecmp); 88 89int 90uvm_pagecmp(struct vm_page *a, struct vm_page *b) 91{ 92 return (a->offset < b->offset ? -1 : a->offset > b->offset); 93} 94 95/* 96 * global vars... XXXCDC: move to uvm. structure. 97 */ 98 99/* 100 * physical memory config is stored in vm_physmem. 101 */ 102 103struct vm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */ 104int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */ 105 106/* 107 * Some supported CPUs in a given architecture don't support all 108 * of the things necessary to do idle page zero'ing efficiently. 109 * We therefore provide a way to disable it from machdep code here. 110 */ 111 112/* 113 * XXX disabled until we can find a way to do this without causing 114 * problems for either cpu caches or DMA latency. 115 */ 116boolean_t vm_page_zero_enable = FALSE; 117 118/* 119 * local variables 120 */ 121 122/* 123 * these variables record the values returned by vm_page_bootstrap, 124 * for debugging purposes. The implementation of uvm_pageboot_alloc 125 * and pmap_startup here also uses them internally. 126 */ 127 128static vaddr_t virtual_space_start; 129static vaddr_t virtual_space_end; 130 131/* 132 * local prototypes 133 */ 134 135static void uvm_pageinsert(struct vm_page *); 136static void uvm_pageremove(struct vm_page *); 137 138/* 139 * inline functions 140 */ 141 142/* 143 * uvm_pageinsert: insert a page in the object 144 * 145 * => caller must lock page queues XXX questionable 146 * => call should have already set pg's object and offset pointers 147 * and bumped the version counter 148 */ 149 150__inline static void 151uvm_pageinsert(struct vm_page *pg) 152{ 153 struct vm_page *dupe; 154 155 KASSERT((pg->pg_flags & PG_TABLED) == 0); 156 dupe = RB_INSERT(uvm_objtree, &pg->uobject->memt, pg); 157 /* not allowed to insert over another page */ 158 KASSERT(dupe == NULL); 159 atomic_setbits_int(&pg->pg_flags, PG_TABLED); 160 pg->uobject->uo_npages++; 161} 162 163/* 164 * uvm_page_remove: remove page from object 165 * 166 * => caller must lock page queues 167 */ 168 169static __inline void 170uvm_pageremove(struct vm_page *pg) 171{ 172 173 KASSERT(pg->pg_flags & PG_TABLED); 174 RB_REMOVE(uvm_objtree, &pg->uobject->memt, pg); 175 176 atomic_clearbits_int(&pg->pg_flags, PG_TABLED); 177 pg->uobject->uo_npages--; 178 pg->uobject = NULL; 179 pg->pg_version++; 180} 181 182/* 183 * uvm_page_init: init the page system. called from uvm_init(). 184 * 185 * => we return the range of kernel virtual memory in kvm_startp/kvm_endp 186 */ 187 188void 189uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp) 190{ 191 vsize_t freepages, pagecount, n; 192 vm_page_t pagearray, curpg; 193 int lcv, i; 194 paddr_t paddr, pgno; 195 struct vm_physseg *seg; 196 197 /* 198 * init the page queues and page queue locks 199 */ 200 201 TAILQ_INIT(&uvm.page_active); 202 TAILQ_INIT(&uvm.page_inactive_swp); 203 TAILQ_INIT(&uvm.page_inactive_obj); 204 mtx_init(&uvm.fpageqlock, IPL_VM); 205 uvm_pmr_init(); 206 207 /* 208 * allocate vm_page structures. 209 */ 210 211 /* 212 * sanity check: 213 * before calling this function the MD code is expected to register 214 * some free RAM with the uvm_page_physload() function. our job 215 * now is to allocate vm_page structures for this memory. 216 */ 217 218 if (vm_nphysseg == 0) 219 panic("uvm_page_bootstrap: no memory pre-allocated"); 220 221 /* 222 * first calculate the number of free pages... 223 * 224 * note that we use start/end rather than avail_start/avail_end. 225 * this allows us to allocate extra vm_page structures in case we 226 * want to return some memory to the pool after booting. 227 */ 228 229 freepages = 0; 230 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++) 231 freepages += (seg->end - seg->start); 232 233 /* 234 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can 235 * use. for each page of memory we use we need a vm_page structure. 236 * thus, the total number of pages we can use is the total size of 237 * the memory divided by the PAGE_SIZE plus the size of the vm_page 238 * structure. we add one to freepages as a fudge factor to avoid 239 * truncation errors (since we can only allocate in terms of whole 240 * pages). 241 */ 242 243 pagecount = (((paddr_t)freepages + 1) << PAGE_SHIFT) / 244 (PAGE_SIZE + sizeof(struct vm_page)); 245 pagearray = (vm_page_t)uvm_pageboot_alloc(pagecount * 246 sizeof(struct vm_page)); 247 memset(pagearray, 0, pagecount * sizeof(struct vm_page)); 248 249 /* 250 * init the vm_page structures and put them in the correct place. 251 */ 252 253 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++) { 254 n = seg->end - seg->start; 255 if (n > pagecount) { 256 panic("uvm_page_init: lost %ld page(s) in init", 257 (long)(n - pagecount)); 258 /* XXXCDC: shouldn't happen? */ 259 /* n = pagecount; */ 260 } 261 262 /* set up page array pointers */ 263 seg->pgs = pagearray; 264 pagearray += n; 265 pagecount -= n; 266 seg->lastpg = seg->pgs + (n - 1); 267 268 /* init and free vm_pages (we've already zeroed them) */ 269 pgno = seg->start; 270 paddr = ptoa(pgno); 271 for (i = 0, curpg = seg->pgs; i < n; 272 i++, curpg++, pgno++, paddr += PAGE_SIZE) { 273 curpg->phys_addr = paddr; 274 VM_MDPAGE_INIT(curpg); 275 if (pgno >= seg->avail_start && 276 pgno <= seg->avail_end) { 277 uvmexp.npages++; 278 } 279 } 280 281 /* 282 * Add pages to free pool. 283 */ 284 uvm_pmr_freepages(&seg->pgs[seg->avail_start - seg->start], 285 seg->avail_end - seg->avail_start); 286 } 287 288 /* 289 * pass up the values of virtual_space_start and 290 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper 291 * layers of the VM. 292 */ 293 294 *kvm_startp = round_page(virtual_space_start); 295 *kvm_endp = trunc_page(virtual_space_end); 296 297 /* 298 * init locks for kernel threads 299 */ 300 mtx_init(&uvm.aiodoned_lock, IPL_BIO); 301 302 /* 303 * init reserve thresholds 304 * XXXCDC - values may need adjusting 305 */ 306 uvmexp.reserve_pagedaemon = 4; 307 uvmexp.reserve_kernel = 6; 308 uvmexp.anonminpct = 10; 309 uvmexp.vnodeminpct = 10; 310 uvmexp.vtextminpct = 5; 311 uvmexp.anonmin = uvmexp.anonminpct * 256 / 100; 312 uvmexp.vnodemin = uvmexp.vnodeminpct * 256 / 100; 313 uvmexp.vtextmin = uvmexp.vtextminpct * 256 / 100; 314 315 /* 316 * determine if we should zero pages in the idle loop. 317 */ 318 319 uvm.page_idle_zero = vm_page_zero_enable; 320 321 /* 322 * done! 323 */ 324 325 uvm.page_init_done = TRUE; 326} 327 328/* 329 * uvm_setpagesize: set the page size 330 * 331 * => sets page_shift and page_mask from uvmexp.pagesize. 332 */ 333 334void 335uvm_setpagesize(void) 336{ 337 if (uvmexp.pagesize == 0) 338 uvmexp.pagesize = DEFAULT_PAGE_SIZE; 339 uvmexp.pagemask = uvmexp.pagesize - 1; 340 if ((uvmexp.pagemask & uvmexp.pagesize) != 0) 341 panic("uvm_setpagesize: page size not a power of two"); 342 for (uvmexp.pageshift = 0; ; uvmexp.pageshift++) 343 if ((1 << uvmexp.pageshift) == uvmexp.pagesize) 344 break; 345} 346 347/* 348 * uvm_pageboot_alloc: steal memory from physmem for bootstrapping 349 */ 350 351vaddr_t 352uvm_pageboot_alloc(vsize_t size) 353{ 354#if defined(PMAP_STEAL_MEMORY) 355 vaddr_t addr; 356 357 /* 358 * defer bootstrap allocation to MD code (it may want to allocate 359 * from a direct-mapped segment). pmap_steal_memory should round 360 * off virtual_space_start/virtual_space_end. 361 */ 362 363 addr = pmap_steal_memory(size, &virtual_space_start, 364 &virtual_space_end); 365 366 return(addr); 367 368#else /* !PMAP_STEAL_MEMORY */ 369 370 static boolean_t initialized = FALSE; 371 vaddr_t addr, vaddr; 372 paddr_t paddr; 373 374 /* round to page size */ 375 size = round_page(size); 376 377 /* 378 * on first call to this function, initialize ourselves. 379 */ 380 if (initialized == FALSE) { 381 pmap_virtual_space(&virtual_space_start, &virtual_space_end); 382 383 /* round it the way we like it */ 384 virtual_space_start = round_page(virtual_space_start); 385 virtual_space_end = trunc_page(virtual_space_end); 386 387 initialized = TRUE; 388 } 389 390 /* 391 * allocate virtual memory for this request 392 */ 393 if (virtual_space_start == virtual_space_end || 394 (virtual_space_end - virtual_space_start) < size) 395 panic("uvm_pageboot_alloc: out of virtual space"); 396 397 addr = virtual_space_start; 398 399#ifdef PMAP_GROWKERNEL 400 /* 401 * If the kernel pmap can't map the requested space, 402 * then allocate more resources for it. 403 */ 404 if (uvm_maxkaddr < (addr + size)) { 405 uvm_maxkaddr = pmap_growkernel(addr + size); 406 if (uvm_maxkaddr < (addr + size)) 407 panic("uvm_pageboot_alloc: pmap_growkernel() failed"); 408 } 409#endif 410 411 virtual_space_start += size; 412 413 /* 414 * allocate and mapin physical pages to back new virtual pages 415 */ 416 417 for (vaddr = round_page(addr) ; vaddr < addr + size ; 418 vaddr += PAGE_SIZE) { 419 420 if (!uvm_page_physget(&paddr)) 421 panic("uvm_pageboot_alloc: out of memory"); 422 423 /* 424 * Note this memory is no longer managed, so using 425 * pmap_kenter is safe. 426 */ 427 pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE); 428 } 429 pmap_update(pmap_kernel()); 430 return(addr); 431#endif /* PMAP_STEAL_MEMORY */ 432} 433 434#if !defined(PMAP_STEAL_MEMORY) 435/* 436 * uvm_page_physget: "steal" one page from the vm_physmem structure. 437 * 438 * => attempt to allocate it off the end of a segment in which the "avail" 439 * values match the start/end values. if we can't do that, then we 440 * will advance both values (making them equal, and removing some 441 * vm_page structures from the non-avail area). 442 * => return false if out of memory. 443 */ 444 445boolean_t 446uvm_page_physget(paddr_t *paddrp) 447{ 448 int lcv; 449 struct vm_physseg *seg; 450 451 /* pass 1: try allocating from a matching end */ 452#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \ 453 (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 454 for (lcv = vm_nphysseg - 1, seg = vm_physmem + lcv; lcv >= 0; 455 lcv--, seg--) 456#else 457 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++) 458#endif 459 { 460 if (uvm.page_init_done == TRUE) 461 panic("uvm_page_physget: called _after_ bootstrap"); 462 463 /* try from front */ 464 if (seg->avail_start == seg->start && 465 seg->avail_start < seg->avail_end) { 466 *paddrp = ptoa(seg->avail_start); 467 seg->avail_start++; 468 seg->start++; 469 /* nothing left? nuke it */ 470 if (seg->avail_start == seg->end) { 471 if (vm_nphysseg == 1) 472 panic("uvm_page_physget: out of memory!"); 473 vm_nphysseg--; 474 for (; lcv < vm_nphysseg; lcv++, seg++) 475 /* structure copy */ 476 seg[0] = seg[1]; 477 } 478 return (TRUE); 479 } 480 481 /* try from rear */ 482 if (seg->avail_end == seg->end && 483 seg->avail_start < seg->avail_end) { 484 *paddrp = ptoa(seg->avail_end - 1); 485 seg->avail_end--; 486 seg->end--; 487 /* nothing left? nuke it */ 488 if (seg->avail_end == seg->start) { 489 if (vm_nphysseg == 1) 490 panic("uvm_page_physget: out of memory!"); 491 vm_nphysseg--; 492 for (; lcv < vm_nphysseg ; lcv++, seg++) 493 /* structure copy */ 494 seg[0] = seg[1]; 495 } 496 return (TRUE); 497 } 498 } 499 500 /* pass2: forget about matching ends, just allocate something */ 501#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \ 502 (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 503 for (lcv = vm_nphysseg - 1, seg = vm_physmem + lcv; lcv >= 0; 504 lcv--, seg--) 505#else 506 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++) 507#endif 508 { 509 510 /* any room in this bank? */ 511 if (seg->avail_start >= seg->avail_end) 512 continue; /* nope */ 513 514 *paddrp = ptoa(seg->avail_start); 515 seg->avail_start++; 516 /* truncate! */ 517 seg->start = seg->avail_start; 518 519 /* nothing left? nuke it */ 520 if (seg->avail_start == seg->end) { 521 if (vm_nphysseg == 1) 522 panic("uvm_page_physget: out of memory!"); 523 vm_nphysseg--; 524 for (; lcv < vm_nphysseg ; lcv++, seg++) 525 /* structure copy */ 526 seg[0] = seg[1]; 527 } 528 return (TRUE); 529 } 530 531 return (FALSE); /* whoops! */ 532} 533 534#endif /* PMAP_STEAL_MEMORY */ 535 536/* 537 * uvm_page_physload: load physical memory into VM system 538 * 539 * => all args are PFs 540 * => all pages in start/end get vm_page structures 541 * => areas marked by avail_start/avail_end get added to the free page pool 542 * => we are limited to VM_PHYSSEG_MAX physical memory segments 543 */ 544 545void 546uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start, 547 paddr_t avail_end, int flags) 548{ 549 int preload, lcv; 550 psize_t npages; 551 struct vm_page *pgs; 552 struct vm_physseg *ps, *seg; 553 554#ifdef DIAGNOSTIC 555 if (uvmexp.pagesize == 0) 556 panic("uvm_page_physload: page size not set!"); 557 558 if (start >= end) 559 panic("uvm_page_physload: start >= end"); 560#endif 561 562 /* 563 * do we have room? 564 */ 565 if (vm_nphysseg == VM_PHYSSEG_MAX) { 566 printf("uvm_page_physload: unable to load physical memory " 567 "segment\n"); 568 printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n", 569 VM_PHYSSEG_MAX, (long long)start, (long long)end); 570 printf("\tincrease VM_PHYSSEG_MAX\n"); 571 return; 572 } 573 574 /* 575 * check to see if this is a "preload" (i.e. uvm_mem_init hasn't been 576 * called yet, so malloc is not available). 577 */ 578 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg; lcv++, seg++) { 579 if (seg->pgs) 580 break; 581 } 582 preload = (lcv == vm_nphysseg); 583 584 /* 585 * if VM is already running, attempt to malloc() vm_page structures 586 */ 587 if (!preload) { 588 /* 589 * XXXCDC: need some sort of lockout for this case 590 * right now it is only used by devices so it should be alright. 591 */ 592 paddr_t paddr; 593 594 npages = end - start; /* # of pages */ 595 596 pgs = (struct vm_page *)uvm_km_zalloc(kernel_map, 597 npages * sizeof(*pgs)); 598 if (pgs == NULL) { 599 printf("uvm_page_physload: can not malloc vm_page " 600 "structs for segment\n"); 601 printf("\tignoring 0x%lx -> 0x%lx\n", start, end); 602 return; 603 } 604 /* init phys_addr and free pages, XXX uvmexp.npages */ 605 for (lcv = 0, paddr = ptoa(start); lcv < npages; 606 lcv++, paddr += PAGE_SIZE) { 607 pgs[lcv].phys_addr = paddr; 608 VM_MDPAGE_INIT(&pgs[lcv]); 609 if (atop(paddr) >= avail_start && 610 atop(paddr) <= avail_end) { 611 if (flags & PHYSLOAD_DEVICE) { 612 atomic_setbits_int(&pgs[lcv].pg_flags, 613 PG_DEV); 614 pgs[lcv].wire_count = 1; 615 } else { 616#if defined(VM_PHYSSEG_NOADD) 617 panic("uvm_page_physload: tried to add RAM after vm_mem_init"); 618#endif 619 } 620 } 621 } 622 623 /* 624 * Add pages to free pool. 625 */ 626 if ((flags & PHYSLOAD_DEVICE) == 0) { 627 uvm_pmr_freepages(&pgs[avail_start - start], 628 avail_end - avail_start); 629 } 630 631 /* XXXCDC: need hook to tell pmap to rebuild pv_list, etc... */ 632 } else { 633 634 /* gcc complains if these don't get init'd */ 635 pgs = NULL; 636 npages = 0; 637 638 } 639 640 /* 641 * now insert us in the proper place in vm_physmem[] 642 */ 643 644#if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM) 645 646 /* random: put it at the end (easy!) */ 647 ps = &vm_physmem[vm_nphysseg]; 648 649#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 650 651 { 652 int x; 653 /* sort by address for binary search */ 654 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg; lcv++, seg++) 655 if (start < seg->start) 656 break; 657 ps = seg; 658 /* move back other entries, if necessary ... */ 659 for (x = vm_nphysseg, seg = vm_physmem + x - 1; x > lcv; 660 x--, seg--) 661 /* structure copy */ 662 seg[1] = seg[0]; 663 } 664 665#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) 666 667 { 668 int x; 669 /* sort by largest segment first */ 670 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg; lcv++, seg++) 671 if ((end - start) > 672 (seg->end - seg->start)) 673 break; 674 ps = &vm_physmem[lcv]; 675 /* move back other entries, if necessary ... */ 676 for (x = vm_nphysseg, seg = vm_physmem + x - 1; x > lcv; 677 x--, seg--) 678 /* structure copy */ 679 seg[1] = seg[0]; 680 } 681 682#else 683 684 panic("uvm_page_physload: unknown physseg strategy selected!"); 685 686#endif 687 688 ps->start = start; 689 ps->end = end; 690 ps->avail_start = avail_start; 691 ps->avail_end = avail_end; 692 if (preload) { 693 ps->pgs = NULL; 694 } else { 695 ps->pgs = pgs; 696 ps->lastpg = pgs + npages - 1; 697 } 698 vm_nphysseg++; 699 700 /* 701 * done! 702 */ 703 704 return; 705} 706 707#ifdef DDB /* XXXCDC: TMP TMP TMP DEBUG DEBUG DEBUG */ 708 709void uvm_page_physdump(void); /* SHUT UP GCC */ 710 711/* call from DDB */ 712void 713uvm_page_physdump(void) 714{ 715 int lcv; 716 struct vm_physseg *seg; 717 718 printf("uvm_page_physdump: physical memory config [segs=%d of %d]:\n", 719 vm_nphysseg, VM_PHYSSEG_MAX); 720 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++) 721 printf("0x%llx->0x%llx [0x%llx->0x%llx]\n", 722 (long long)seg->start, 723 (long long)seg->end, 724 (long long)seg->avail_start, 725 (long long)seg->avail_end); 726 printf("STRATEGY = "); 727 switch (VM_PHYSSEG_STRAT) { 728 case VM_PSTRAT_RANDOM: printf("RANDOM\n"); break; 729 case VM_PSTRAT_BSEARCH: printf("BSEARCH\n"); break; 730 case VM_PSTRAT_BIGFIRST: printf("BIGFIRST\n"); break; 731 default: printf("<<UNKNOWN>>!!!!\n"); 732 } 733} 734#endif 735 736void 737uvm_shutdown(void) 738{ 739#ifdef UVM_SWAP_ENCRYPT 740 uvm_swap_finicrypt_all(); 741#endif 742} 743 744/* 745 * Perform insert of a given page in the specified anon of obj. 746 * This is basically, uvm_pagealloc, but with the page already given. 747 */ 748void 749uvm_pagealloc_pg(struct vm_page *pg, struct uvm_object *obj, voff_t off, 750 struct vm_anon *anon) 751{ 752 int flags; 753 754 flags = PG_BUSY | PG_FAKE; 755 pg->offset = off; 756 pg->uobject = obj; 757 pg->uanon = anon; 758 759 if (anon) { 760 anon->an_page = pg; 761 flags |= PQ_ANON; 762 } else if (obj) 763 uvm_pageinsert(pg); 764 atomic_setbits_int(&pg->pg_flags, flags); 765#if defined(UVM_PAGE_TRKOWN) 766 pg->owner_tag = NULL; 767#endif 768 UVM_PAGE_OWN(pg, "new alloc"); 769} 770 771/* 772 * uvm_pglistalloc: allocate a list of pages 773 * 774 * => allocated pages are placed at the tail of rlist. rlist is 775 * assumed to be properly initialized by caller. 776 * => returns 0 on success or errno on failure 777 * => doesn't take into account clean non-busy pages on inactive list 778 * that could be used(?) 779 * => params: 780 * size the size of the allocation, rounded to page size. 781 * low the low address of the allowed allocation range. 782 * high the high address of the allowed allocation range. 783 * alignment memory must be aligned to this power-of-two boundary. 784 * boundary no segment in the allocation may cross this 785 * power-of-two boundary (relative to zero). 786 * => flags: 787 * UVM_PLA_NOWAIT fail if allocation fails 788 * UVM_PLA_WAITOK wait for memory to become avail 789 * UVM_PLA_ZERO return zeroed memory 790 */ 791int 792uvm_pglistalloc(psize_t size, paddr_t low, paddr_t high, paddr_t alignment, 793 paddr_t boundary, struct pglist *rlist, int nsegs, int flags) 794{ 795 KASSERT((alignment & (alignment - 1)) == 0); 796 KASSERT((boundary & (boundary - 1)) == 0); 797 KASSERT(!(flags & UVM_PLA_WAITOK) ^ !(flags & UVM_PLA_NOWAIT)); 798 799 if (size == 0) 800 return (EINVAL); 801 size = atop(round_page(size)); 802 803 /* 804 * check to see if we need to generate some free pages waking 805 * the pagedaemon. 806 */ 807 if ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freemin || 808 ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freetarg && 809 (uvmexp.inactive + BUFPAGES_INACT) < uvmexp.inactarg)) 810 wakeup(&uvm.pagedaemon); 811 812 /* 813 * XXX uvm_pglistalloc is currently only used for kernel 814 * objects. Unlike the checks in uvm_pagealloc, below, here 815 * we are always allowed to use the kernel reseve. However, we 816 * have to enforce the pagedaemon reserve here or allocations 817 * via this path could consume everything and we can't 818 * recover in the page daemon. 819 */ 820 again: 821 if ((uvmexp.free <= uvmexp.reserve_pagedaemon + size && 822 !((curproc == uvm.pagedaemon_proc) || 823 (curproc == syncerproc)))) { 824 if (flags & UVM_PLA_WAITOK) { 825 uvm_wait("uvm_pglistalloc"); 826 goto again; 827 } 828 return (ENOMEM); 829 } 830 831 if ((high & PAGE_MASK) != PAGE_MASK) { 832 printf("uvm_pglistalloc: Upper boundary 0x%lx " 833 "not on pagemask.\n", (unsigned long)high); 834 } 835 836 /* 837 * Our allocations are always page granularity, so our alignment 838 * must be, too. 839 */ 840 if (alignment < PAGE_SIZE) 841 alignment = PAGE_SIZE; 842 843 low = atop(roundup(low, alignment)); 844 /* 845 * high + 1 may result in overflow, in which case high becomes 0x0, 846 * which is the 'don't care' value. 847 * The only requirement in that case is that low is also 0x0, or the 848 * low<high assert will fail. 849 */ 850 high = atop(high + 1); 851 alignment = atop(alignment); 852 if (boundary < PAGE_SIZE && boundary != 0) 853 boundary = PAGE_SIZE; 854 boundary = atop(boundary); 855 856 return uvm_pmr_getpages(size, low, high, alignment, boundary, nsegs, 857 flags, rlist); 858} 859 860/* 861 * uvm_pglistfree: free a list of pages 862 * 863 * => pages should already be unmapped 864 */ 865void 866uvm_pglistfree(struct pglist *list) 867{ 868 uvm_pmr_freepageq(list); 869} 870 871/* 872 * interface used by the buffer cache to allocate a buffer at a time. 873 * The pages are allocated wired in DMA accessible memory 874 */ 875void 876uvm_pagealloc_multi(struct uvm_object *obj, voff_t off, vsize_t size, 877 int flags) 878{ 879 struct pglist plist; 880 struct vm_page *pg; 881 int i; 882 883 884 TAILQ_INIT(&plist); 885 (void) uvm_pglistalloc(size, dma_constraint.ucr_low, 886 dma_constraint.ucr_high, 0, 0, &plist, atop(round_page(size)), 887 UVM_PLA_WAITOK); 888 i = 0; 889 while ((pg = TAILQ_FIRST(&plist)) != NULL) { 890 pg->wire_count = 1; 891 atomic_setbits_int(&pg->pg_flags, PG_CLEAN | PG_FAKE); 892 KASSERT((pg->pg_flags & PG_DEV) == 0); 893 TAILQ_REMOVE(&plist, pg, pageq); 894 uvm_pagealloc_pg(pg, obj, off + ptoa(i++), NULL); 895 } 896} 897 898/* 899 * interface used by the buffer cache to reallocate a buffer at a time. 900 * The pages are reallocated wired outside the DMA accessible region. 901 * 902 */ 903void 904uvm_pagerealloc_multi(struct uvm_object *obj, voff_t off, vsize_t size, 905 int flags, struct uvm_constraint_range *where) 906{ 907 struct pglist plist; 908 struct vm_page *pg, *tpg; 909 int i; 910 voff_t offset; 911 912 913 TAILQ_INIT(&plist); 914 if (size == 0) 915 panic("size 0 uvm_pagerealloc"); 916 (void) uvm_pglistalloc(size, where->ucr_low, where->ucr_high, 0, 917 0, &plist, atop(round_page(size)), UVM_PLA_WAITOK); 918 i = 0; 919 while((pg = TAILQ_FIRST(&plist)) != NULL) { 920 offset = off + ptoa(i++); 921 tpg = uvm_pagelookup(obj, offset); 922 pg->wire_count = 1; 923 atomic_setbits_int(&pg->pg_flags, PG_CLEAN | PG_FAKE); 924 KASSERT((pg->pg_flags & PG_DEV) == 0); 925 TAILQ_REMOVE(&plist, pg, pageq); 926 uvm_pagecopy(tpg, pg); 927 uvm_pagefree(tpg); 928 uvm_pagealloc_pg(pg, obj, offset, NULL); 929 } 930} 931 932/* 933 * uvm_pagealloc_strat: allocate vm_page from a particular free list. 934 * 935 * => return null if no pages free 936 * => wake up pagedaemon if number of free pages drops below low water mark 937 * => only one of obj or anon can be non-null 938 * => caller must activate/deactivate page if it is not wired. 939 */ 940 941struct vm_page * 942uvm_pagealloc(struct uvm_object *obj, voff_t off, struct vm_anon *anon, 943 int flags) 944{ 945 struct vm_page *pg; 946 struct pglist pgl; 947 int pmr_flags; 948 boolean_t use_reserve; 949 950 KASSERT(obj == NULL || anon == NULL); 951 KASSERT(off == trunc_page(off)); 952 953 /* 954 * check to see if we need to generate some free pages waking 955 * the pagedaemon. 956 */ 957 if ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freemin || 958 ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freetarg && 959 (uvmexp.inactive + BUFPAGES_INACT) < uvmexp.inactarg)) 960 wakeup(&uvm.pagedaemon); 961 962 /* 963 * fail if any of these conditions is true: 964 * [1] there really are no free pages, or 965 * [2] only kernel "reserved" pages remain and 966 * the page isn't being allocated to a kernel object. 967 * [3] only pagedaemon "reserved" pages remain and 968 * the requestor isn't the pagedaemon. 969 */ 970 971 use_reserve = (flags & UVM_PGA_USERESERVE) || 972 (obj && UVM_OBJ_IS_KERN_OBJECT(obj)); 973 if ((uvmexp.free <= uvmexp.reserve_kernel && !use_reserve) || 974 (uvmexp.free <= uvmexp.reserve_pagedaemon && 975 !((curproc == uvm.pagedaemon_proc) || 976 (curproc == syncerproc)))) 977 goto fail; 978 979 pmr_flags = UVM_PLA_NOWAIT; 980 if (flags & UVM_PGA_ZERO) 981 pmr_flags |= UVM_PLA_ZERO; 982 TAILQ_INIT(&pgl); 983 if (uvm_pmr_getpages(1, 0, 0, 1, 0, 1, pmr_flags, &pgl) != 0) 984 goto fail; 985 986 pg = TAILQ_FIRST(&pgl); 987 KASSERT(pg != NULL && TAILQ_NEXT(pg, pageq) == NULL); 988 989 uvm_pagealloc_pg(pg, obj, off, anon); 990 KASSERT((pg->pg_flags & PG_DEV) == 0); 991 if (flags & UVM_PGA_ZERO) 992 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 993 else 994 atomic_setbits_int(&pg->pg_flags, PG_CLEAN); 995 996 return(pg); 997 998 fail: 999 return (NULL); 1000} 1001 1002/* 1003 * uvm_pagerealloc: reallocate a page from one object to another 1004 */ 1005 1006void 1007uvm_pagerealloc(struct vm_page *pg, struct uvm_object *newobj, voff_t newoff) 1008{ 1009 1010 /* 1011 * remove it from the old object 1012 */ 1013 1014 if (pg->uobject) { 1015 uvm_pageremove(pg); 1016 } 1017 1018 /* 1019 * put it in the new object 1020 */ 1021 1022 if (newobj) { 1023 pg->uobject = newobj; 1024 pg->offset = newoff; 1025 pg->pg_version++; 1026 uvm_pageinsert(pg); 1027 } 1028} 1029 1030 1031/* 1032 * uvm_pagefree: free page 1033 * 1034 * => erase page's identity (i.e. remove from object) 1035 * => put page on free list 1036 * => caller must lock page queues 1037 * => assumes all valid mappings of pg are gone 1038 */ 1039 1040void 1041uvm_pagefree(struct vm_page *pg) 1042{ 1043 int saved_loan_count = pg->loan_count; 1044 u_int flags_to_clear = 0; 1045 1046#ifdef DEBUG 1047 if (pg->uobject == (void *)0xdeadbeef && 1048 pg->uanon == (void *)0xdeadbeef) { 1049 panic("uvm_pagefree: freeing free page %p", pg); 1050 } 1051#endif 1052 1053 KASSERT((pg->pg_flags & PG_DEV) == 0); 1054 1055 /* 1056 * if the page was an object page (and thus "TABLED"), remove it 1057 * from the object. 1058 */ 1059 1060 if (pg->pg_flags & PG_TABLED) { 1061 1062 /* 1063 * if the object page is on loan we are going to drop ownership. 1064 * it is possible that an anon will take over as owner for this 1065 * page later on. the anon will want a !PG_CLEAN page so that 1066 * it knows it needs to allocate swap if it wants to page the 1067 * page out. 1068 */ 1069 1070 /* in case an anon takes over */ 1071 if (saved_loan_count) 1072 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 1073 uvm_pageremove(pg); 1074 1075 /* 1076 * if our page was on loan, then we just lost control over it 1077 * (in fact, if it was loaned to an anon, the anon may have 1078 * already taken over ownership of the page by now and thus 1079 * changed the loan_count [e.g. in uvmfault_anonget()]) we just 1080 * return (when the last loan is dropped, then the page can be 1081 * freed by whatever was holding the last loan). 1082 */ 1083 1084 if (saved_loan_count) 1085 return; 1086 } else if (saved_loan_count && pg->uanon) { 1087 /* 1088 * if our page is owned by an anon and is loaned out to the 1089 * kernel then we just want to drop ownership and return. 1090 * the kernel must free the page when all its loans clear ... 1091 * note that the kernel can't change the loan status of our 1092 * page as long as we are holding PQ lock. 1093 */ 1094 atomic_clearbits_int(&pg->pg_flags, PQ_ANON); 1095 pg->uanon->an_page = NULL; 1096 pg->uanon = NULL; 1097 return; 1098 } 1099 KASSERT(saved_loan_count == 0); 1100 1101 /* 1102 * now remove the page from the queues 1103 */ 1104 1105 if (pg->pg_flags & PQ_ACTIVE) { 1106 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1107 flags_to_clear |= PQ_ACTIVE; 1108 uvmexp.active--; 1109 } 1110 if (pg->pg_flags & PQ_INACTIVE) { 1111 if (pg->pg_flags & PQ_SWAPBACKED) 1112 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); 1113 else 1114 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); 1115 flags_to_clear |= PQ_INACTIVE; 1116 uvmexp.inactive--; 1117 } 1118 1119 /* 1120 * if the page was wired, unwire it now. 1121 */ 1122 1123 if (pg->wire_count) { 1124 pg->wire_count = 0; 1125 uvmexp.wired--; 1126 } 1127 if (pg->uanon) { 1128 pg->uanon->an_page = NULL; 1129 pg->uanon = NULL; 1130 flags_to_clear |= PQ_ANON; 1131 } 1132 1133 /* 1134 * Clean page state bits. 1135 */ 1136 flags_to_clear |= PQ_AOBJ; /* XXX: find culprit */ 1137 flags_to_clear |= PQ_ENCRYPT|PG_ZERO|PG_FAKE|PG_BUSY|PG_RELEASED| 1138 PG_CLEAN|PG_CLEANCHK; 1139 atomic_clearbits_int(&pg->pg_flags, flags_to_clear); 1140 1141 /* 1142 * and put on free queue 1143 */ 1144 1145#ifdef DEBUG 1146 pg->uobject = (void *)0xdeadbeef; 1147 pg->offset = 0xdeadbeef; 1148 pg->uanon = (void *)0xdeadbeef; 1149#endif 1150 1151 uvm_pmr_freepages(pg, 1); 1152 1153 if (uvmexp.zeropages < UVM_PAGEZERO_TARGET) 1154 uvm.page_idle_zero = vm_page_zero_enable; 1155} 1156 1157/* 1158 * uvm_page_unbusy: unbusy an array of pages. 1159 * 1160 * => pages must either all belong to the same object, or all belong to anons. 1161 * => if pages are anon-owned, anons must have 0 refcount. 1162 */ 1163 1164void 1165uvm_page_unbusy(struct vm_page **pgs, int npgs) 1166{ 1167 struct vm_page *pg; 1168 struct uvm_object *uobj; 1169 int i; 1170 1171 for (i = 0; i < npgs; i++) { 1172 pg = pgs[i]; 1173 1174 if (pg == NULL || pg == PGO_DONTCARE) { 1175 continue; 1176 } 1177 if (pg->pg_flags & PG_WANTED) { 1178 wakeup(pg); 1179 } 1180 if (pg->pg_flags & PG_RELEASED) { 1181 uobj = pg->uobject; 1182 if (uobj != NULL) { 1183 uvm_lock_pageq(); 1184 pmap_page_protect(pg, VM_PROT_NONE); 1185 /* XXX won't happen right now */ 1186 if (pg->pg_flags & PQ_AOBJ) 1187 uao_dropswap(uobj, 1188 pg->offset >> PAGE_SHIFT); 1189 uvm_pagefree(pg); 1190 uvm_unlock_pageq(); 1191 } else { 1192 atomic_clearbits_int(&pg->pg_flags, PG_BUSY); 1193 UVM_PAGE_OWN(pg, NULL); 1194 uvm_anfree(pg->uanon); 1195 } 1196 } else { 1197 atomic_clearbits_int(&pg->pg_flags, PG_WANTED|PG_BUSY); 1198 UVM_PAGE_OWN(pg, NULL); 1199 } 1200 } 1201} 1202 1203#if defined(UVM_PAGE_TRKOWN) 1204/* 1205 * uvm_page_own: set or release page ownership 1206 * 1207 * => this is a debugging function that keeps track of who sets PG_BUSY 1208 * and where they do it. it can be used to track down problems 1209 * such a process setting "PG_BUSY" and never releasing it. 1210 * => if "tag" is NULL then we are releasing page ownership 1211 */ 1212void 1213uvm_page_own(struct vm_page *pg, char *tag) 1214{ 1215 /* gain ownership? */ 1216 if (tag) { 1217 if (pg->owner_tag) { 1218 printf("uvm_page_own: page %p already owned " 1219 "by proc %d [%s]\n", pg, 1220 pg->owner, pg->owner_tag); 1221 panic("uvm_page_own"); 1222 } 1223 pg->owner = (curproc) ? curproc->p_pid : (pid_t) -1; 1224 pg->owner_tag = tag; 1225 return; 1226 } 1227 1228 /* drop ownership */ 1229 if (pg->owner_tag == NULL) { 1230 printf("uvm_page_own: dropping ownership of an non-owned " 1231 "page (%p)\n", pg); 1232 panic("uvm_page_own"); 1233 } 1234 pg->owner_tag = NULL; 1235 return; 1236} 1237#endif 1238 1239/* 1240 * uvm_pageidlezero: zero free pages while the system is idle. 1241 * 1242 * => we do at least one iteration per call, if we are below the target. 1243 * => we loop until we either reach the target or whichqs indicates that 1244 * there is a process ready to run. 1245 */ 1246void 1247uvm_pageidlezero(void) 1248{ 1249#if 0 /* disabled: need new code */ 1250 struct vm_page *pg; 1251 struct pgfreelist *pgfl; 1252 int free_list; 1253 1254 do { 1255 uvm_lock_fpageq(); 1256 1257 if (uvmexp.zeropages >= UVM_PAGEZERO_TARGET) { 1258 uvm.page_idle_zero = FALSE; 1259 uvm_unlock_fpageq(); 1260 return; 1261 } 1262 1263 for (free_list = 0; free_list < VM_NFREELIST; free_list++) { 1264 pgfl = &uvm.page_free[free_list]; 1265 if ((pg = TAILQ_FIRST(&pgfl->pgfl_queues[ 1266 PGFL_UNKNOWN])) != NULL) 1267 break; 1268 } 1269 1270 if (pg == NULL) { 1271 /* 1272 * No non-zero'd pages; don't bother trying again 1273 * until we know we have non-zero'd pages free. 1274 */ 1275 uvm.page_idle_zero = FALSE; 1276 uvm_unlock_fpageq(); 1277 return; 1278 } 1279 1280 TAILQ_REMOVE(&pgfl->pgfl_queues[PGFL_UNKNOWN], pg, pageq); 1281 uvmexp.free--; 1282 uvm_unlock_fpageq(); 1283 1284#ifdef PMAP_PAGEIDLEZERO 1285 if (PMAP_PAGEIDLEZERO(pg) == FALSE) { 1286 /* 1287 * The machine-dependent code detected some 1288 * reason for us to abort zeroing pages, 1289 * probably because there is a process now 1290 * ready to run. 1291 */ 1292 uvm_lock_fpageq(); 1293 TAILQ_INSERT_HEAD(&pgfl->pgfl_queues[PGFL_UNKNOWN], 1294 pg, pageq); 1295 uvmexp.free++; 1296 uvmexp.zeroaborts++; 1297 uvm_unlock_fpageq(); 1298 return; 1299 } 1300#else 1301 /* 1302 * XXX This will toast the cache unless the pmap_zero_page() 1303 * XXX implementation does uncached access. 1304 */ 1305 pmap_zero_page(pg); 1306#endif 1307 atomic_setbits_int(&pg->pg_flags, PG_ZERO); 1308 1309 uvm_lock_fpageq(); 1310 TAILQ_INSERT_HEAD(&pgfl->pgfl_queues[PGFL_ZEROS], pg, pageq); 1311 uvmexp.free++; 1312 uvmexp.zeropages++; 1313 uvm_unlock_fpageq(); 1314 } while (curcpu_is_idle()); 1315#endif /* 0 */ 1316} 1317 1318/* 1319 * when VM_PHYSSEG_MAX is 1, we can simplify these functions 1320 */ 1321 1322#if VM_PHYSSEG_MAX > 1 1323/* 1324 * vm_physseg_find: find vm_physseg structure that belongs to a PA 1325 */ 1326int 1327vm_physseg_find(paddr_t pframe, int *offp) 1328{ 1329 struct vm_physseg *seg; 1330 1331#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 1332 /* binary search for it */ 1333 int start, len, try; 1334 1335 /* 1336 * if try is too large (thus target is less than than try) we reduce 1337 * the length to trunc(len/2) [i.e. everything smaller than "try"] 1338 * 1339 * if the try is too small (thus target is greater than try) then 1340 * we set the new start to be (try + 1). this means we need to 1341 * reduce the length to (round(len/2) - 1). 1342 * 1343 * note "adjust" below which takes advantage of the fact that 1344 * (round(len/2) - 1) == trunc((len - 1) / 2) 1345 * for any value of len we may have 1346 */ 1347 1348 for (start = 0, len = vm_nphysseg ; len != 0 ; len = len / 2) { 1349 try = start + (len / 2); /* try in the middle */ 1350 seg = vm_physmem + try; 1351 1352 /* start past our try? */ 1353 if (pframe >= seg->start) { 1354 /* was try correct? */ 1355 if (pframe < seg->end) { 1356 if (offp) 1357 *offp = pframe - seg->start; 1358 return(try); /* got it */ 1359 } 1360 start = try + 1; /* next time, start here */ 1361 len--; /* "adjust" */ 1362 } else { 1363 /* 1364 * pframe before try, just reduce length of 1365 * region, done in "for" loop 1366 */ 1367 } 1368 } 1369 return(-1); 1370 1371#else 1372 /* linear search for it */ 1373 int lcv; 1374 1375 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++) { 1376 if (pframe >= seg->start && pframe < seg->end) { 1377 if (offp) 1378 *offp = pframe - seg->start; 1379 return(lcv); /* got it */ 1380 } 1381 } 1382 return(-1); 1383 1384#endif 1385} 1386 1387/* 1388 * PHYS_TO_VM_PAGE: find vm_page for a PA. used by MI code to get vm_pages 1389 * back from an I/O mapping (ugh!). used in some MD code as well. 1390 */ 1391struct vm_page * 1392PHYS_TO_VM_PAGE(paddr_t pa) 1393{ 1394 paddr_t pf = atop(pa); 1395 int off; 1396 int psi; 1397 1398 psi = vm_physseg_find(pf, &off); 1399 1400 return ((psi == -1) ? NULL : &vm_physmem[psi].pgs[off]); 1401} 1402#endif /* VM_PHYSSEG_MAX > 1 */ 1403 1404/* 1405 * uvm_pagelookup: look up a page 1406 */ 1407struct vm_page * 1408uvm_pagelookup(struct uvm_object *obj, voff_t off) 1409{ 1410 /* XXX if stack is too much, handroll */ 1411 struct vm_page pg; 1412 1413 pg.offset = off; 1414 return (RB_FIND(uvm_objtree, &obj->memt, &pg)); 1415} 1416 1417/* 1418 * uvm_pagewire: wire the page, thus removing it from the daemon's grasp 1419 * 1420 * => caller must lock page queues 1421 */ 1422void 1423uvm_pagewire(struct vm_page *pg) 1424{ 1425 if (pg->wire_count == 0) { 1426 if (pg->pg_flags & PQ_ACTIVE) { 1427 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1428 atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE); 1429 uvmexp.active--; 1430 } 1431 if (pg->pg_flags & PQ_INACTIVE) { 1432 if (pg->pg_flags & PQ_SWAPBACKED) 1433 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); 1434 else 1435 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); 1436 atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE); 1437 uvmexp.inactive--; 1438 } 1439 uvmexp.wired++; 1440 } 1441 pg->wire_count++; 1442} 1443 1444/* 1445 * uvm_pageunwire: unwire the page. 1446 * 1447 * => activate if wire count goes to zero. 1448 * => caller must lock page queues 1449 */ 1450void 1451uvm_pageunwire(struct vm_page *pg) 1452{ 1453 pg->wire_count--; 1454 if (pg->wire_count == 0) { 1455 TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq); 1456 uvmexp.active++; 1457 atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE); 1458 uvmexp.wired--; 1459 } 1460} 1461 1462/* 1463 * uvm_pagedeactivate: deactivate page -- no pmaps have access to page 1464 * 1465 * => caller must lock page queues 1466 * => caller must check to make sure page is not wired 1467 * => object that page belongs to must be locked (so we can adjust pg->flags) 1468 */ 1469void 1470uvm_pagedeactivate(struct vm_page *pg) 1471{ 1472 if (pg->pg_flags & PQ_ACTIVE) { 1473 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1474 atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE); 1475 uvmexp.active--; 1476 } 1477 if ((pg->pg_flags & PQ_INACTIVE) == 0) { 1478 KASSERT(pg->wire_count == 0); 1479 if (pg->pg_flags & PQ_SWAPBACKED) 1480 TAILQ_INSERT_TAIL(&uvm.page_inactive_swp, pg, pageq); 1481 else 1482 TAILQ_INSERT_TAIL(&uvm.page_inactive_obj, pg, pageq); 1483 atomic_setbits_int(&pg->pg_flags, PQ_INACTIVE); 1484 uvmexp.inactive++; 1485 pmap_clear_reference(pg); 1486 /* 1487 * update the "clean" bit. this isn't 100% 1488 * accurate, and doesn't have to be. we'll 1489 * re-sync it after we zap all mappings when 1490 * scanning the inactive list. 1491 */ 1492 if ((pg->pg_flags & PG_CLEAN) != 0 && 1493 pmap_is_modified(pg)) 1494 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 1495 } 1496} 1497 1498/* 1499 * uvm_pageactivate: activate page 1500 * 1501 * => caller must lock page queues 1502 */ 1503void 1504uvm_pageactivate(struct vm_page *pg) 1505{ 1506 if (pg->pg_flags & PQ_INACTIVE) { 1507 if (pg->pg_flags & PQ_SWAPBACKED) 1508 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); 1509 else 1510 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); 1511 atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE); 1512 uvmexp.inactive--; 1513 } 1514 if (pg->wire_count == 0) { 1515 1516 /* 1517 * if page is already active, remove it from list so we 1518 * can put it at tail. if it wasn't active, then mark 1519 * it active and bump active count 1520 */ 1521 if (pg->pg_flags & PQ_ACTIVE) 1522 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1523 else { 1524 atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE); 1525 uvmexp.active++; 1526 } 1527 1528 TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq); 1529 } 1530} 1531 1532/* 1533 * uvm_pagezero: zero fill a page 1534 */ 1535void 1536uvm_pagezero(struct vm_page *pg) 1537{ 1538 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 1539 pmap_zero_page(pg); 1540} 1541 1542/* 1543 * uvm_pagecopy: copy a page 1544 */ 1545void 1546uvm_pagecopy(struct vm_page *src, struct vm_page *dst) 1547{ 1548 atomic_clearbits_int(&dst->pg_flags, PG_CLEAN); 1549 pmap_copy_page(src, dst); 1550} 1551 1552/* 1553 * uvm_pagecount: count the number of physical pages in the address range. 1554 */ 1555psize_t 1556uvm_pagecount(struct uvm_constraint_range* constraint) 1557{ 1558 int lcv; 1559 psize_t sz; 1560 paddr_t low, high; 1561 paddr_t ps_low, ps_high; 1562 1563 /* Algorithm uses page numbers. */ 1564 low = atop(constraint->ucr_low); 1565 high = atop(constraint->ucr_high); 1566 1567 sz = 0; 1568 for (lcv = 0; lcv < vm_nphysseg; lcv++) { 1569 ps_low = MAX(low, vm_physmem[lcv].avail_start); 1570 ps_high = MIN(high, vm_physmem[lcv].avail_end); 1571 if (ps_low < ps_high) 1572 sz += ps_high - ps_low; 1573 } 1574 return sz; 1575} 1576