uvm_page.c revision 1.127
1/* $OpenBSD: uvm_page.c,v 1.127 2013/06/21 21:42:17 kettenis Exp $ */ 2/* $NetBSD: uvm_page.c,v 1.44 2000/11/27 08:40:04 chs Exp $ */ 3 4/* 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * Copyright (c) 1991, 1993, The Regents of the University of California. 7 * 8 * All rights reserved. 9 * 10 * This code is derived from software contributed to Berkeley by 11 * The Mach Operating System project at Carnegie-Mellon University. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by Charles D. Cranor, 24 * Washington University, the University of California, Berkeley and 25 * its contributors. 26 * 4. Neither the name of the University nor the names of its contributors 27 * may be used to endorse or promote products derived from this software 28 * without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 * 42 * @(#)vm_page.c 8.3 (Berkeley) 3/21/94 43 * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp 44 * 45 * 46 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 47 * All rights reserved. 48 * 49 * Permission to use, copy, modify and distribute this software and 50 * its documentation is hereby granted, provided that both the copyright 51 * notice and this permission notice appear in all copies of the 52 * software, derivative works or modified versions, and any portions 53 * thereof, and that both notices appear in supporting documentation. 54 * 55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 56 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 58 * 59 * Carnegie Mellon requests users of this software to return to 60 * 61 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 62 * School of Computer Science 63 * Carnegie Mellon University 64 * Pittsburgh PA 15213-3890 65 * 66 * any improvements or extensions that they make and grant Carnegie the 67 * rights to redistribute these changes. 68 */ 69 70/* 71 * uvm_page.c: page ops. 72 */ 73 74#include <sys/param.h> 75#include <sys/systm.h> 76#include <sys/sched.h> 77#include <sys/kernel.h> 78#include <sys/vnode.h> 79#include <sys/mount.h> 80#include <sys/proc.h> 81 82#include <uvm/uvm.h> 83 84/* 85 * for object trees 86 */ 87RB_GENERATE(uvm_objtree, vm_page, objt, uvm_pagecmp); 88 89int 90uvm_pagecmp(struct vm_page *a, struct vm_page *b) 91{ 92 return (a->offset < b->offset ? -1 : a->offset > b->offset); 93} 94 95/* 96 * global vars... XXXCDC: move to uvm. structure. 97 */ 98 99/* 100 * physical memory config is stored in vm_physmem. 101 */ 102 103struct vm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */ 104int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */ 105 106/* 107 * Some supported CPUs in a given architecture don't support all 108 * of the things necessary to do idle page zero'ing efficiently. 109 * We therefore provide a way to disable it from machdep code here. 110 */ 111 112/* 113 * XXX disabled until we can find a way to do this without causing 114 * problems for either cpu caches or DMA latency. 115 */ 116boolean_t vm_page_zero_enable = FALSE; 117 118/* 119 * local variables 120 */ 121 122/* 123 * these variables record the values returned by vm_page_bootstrap, 124 * for debugging purposes. The implementation of uvm_pageboot_alloc 125 * and pmap_startup here also uses them internally. 126 */ 127 128static vaddr_t virtual_space_start; 129static vaddr_t virtual_space_end; 130 131/* 132 * local prototypes 133 */ 134 135static void uvm_pageinsert(struct vm_page *); 136static void uvm_pageremove(struct vm_page *); 137 138/* 139 * inline functions 140 */ 141 142/* 143 * uvm_pageinsert: insert a page in the object 144 * 145 * => caller must lock page queues XXX questionable 146 * => call should have already set pg's object and offset pointers 147 * and bumped the version counter 148 */ 149 150__inline static void 151uvm_pageinsert(struct vm_page *pg) 152{ 153 struct vm_page *dupe; 154 155 KASSERT((pg->pg_flags & PG_TABLED) == 0); 156 dupe = RB_INSERT(uvm_objtree, &pg->uobject->memt, pg); 157 /* not allowed to insert over another page */ 158 KASSERT(dupe == NULL); 159 atomic_setbits_int(&pg->pg_flags, PG_TABLED); 160 pg->uobject->uo_npages++; 161} 162 163/* 164 * uvm_page_remove: remove page from object 165 * 166 * => caller must lock page queues 167 */ 168 169static __inline void 170uvm_pageremove(struct vm_page *pg) 171{ 172 173 KASSERT(pg->pg_flags & PG_TABLED); 174 RB_REMOVE(uvm_objtree, &pg->uobject->memt, pg); 175 176 atomic_clearbits_int(&pg->pg_flags, PG_TABLED); 177 pg->uobject->uo_npages--; 178 pg->uobject = NULL; 179 pg->pg_version++; 180} 181 182/* 183 * uvm_page_init: init the page system. called from uvm_init(). 184 * 185 * => we return the range of kernel virtual memory in kvm_startp/kvm_endp 186 */ 187 188void 189uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp) 190{ 191 vsize_t freepages, pagecount, n; 192 vm_page_t pagearray, curpg; 193 int lcv, i; 194 paddr_t paddr, pgno; 195 struct vm_physseg *seg; 196 197 /* 198 * init the page queues and page queue locks 199 */ 200 201 TAILQ_INIT(&uvm.page_active); 202 TAILQ_INIT(&uvm.page_inactive_swp); 203 TAILQ_INIT(&uvm.page_inactive_obj); 204 mtx_init(&uvm.fpageqlock, IPL_VM); 205 uvm_pmr_init(); 206 207 /* 208 * allocate vm_page structures. 209 */ 210 211 /* 212 * sanity check: 213 * before calling this function the MD code is expected to register 214 * some free RAM with the uvm_page_physload() function. our job 215 * now is to allocate vm_page structures for this memory. 216 */ 217 218 if (vm_nphysseg == 0) 219 panic("uvm_page_bootstrap: no memory pre-allocated"); 220 221 /* 222 * first calculate the number of free pages... 223 * 224 * note that we use start/end rather than avail_start/avail_end. 225 * this allows us to allocate extra vm_page structures in case we 226 * want to return some memory to the pool after booting. 227 */ 228 229 freepages = 0; 230 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++) 231 freepages += (seg->end - seg->start); 232 233 /* 234 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can 235 * use. for each page of memory we use we need a vm_page structure. 236 * thus, the total number of pages we can use is the total size of 237 * the memory divided by the PAGE_SIZE plus the size of the vm_page 238 * structure. we add one to freepages as a fudge factor to avoid 239 * truncation errors (since we can only allocate in terms of whole 240 * pages). 241 */ 242 243 pagecount = (((paddr_t)freepages + 1) << PAGE_SHIFT) / 244 (PAGE_SIZE + sizeof(struct vm_page)); 245 pagearray = (vm_page_t)uvm_pageboot_alloc(pagecount * 246 sizeof(struct vm_page)); 247 memset(pagearray, 0, pagecount * sizeof(struct vm_page)); 248 249 /* 250 * init the vm_page structures and put them in the correct place. 251 */ 252 253 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++) { 254 n = seg->end - seg->start; 255 if (n > pagecount) { 256 panic("uvm_page_init: lost %ld page(s) in init", 257 (long)(n - pagecount)); 258 /* XXXCDC: shouldn't happen? */ 259 /* n = pagecount; */ 260 } 261 262 /* set up page array pointers */ 263 seg->pgs = pagearray; 264 pagearray += n; 265 pagecount -= n; 266 seg->lastpg = seg->pgs + (n - 1); 267 268 /* init and free vm_pages (we've already zeroed them) */ 269 pgno = seg->start; 270 paddr = ptoa(pgno); 271 for (i = 0, curpg = seg->pgs; i < n; 272 i++, curpg++, pgno++, paddr += PAGE_SIZE) { 273 curpg->phys_addr = paddr; 274#ifdef __HAVE_VM_PAGE_MD 275 VM_MDPAGE_INIT(curpg); 276#endif 277 if (pgno >= seg->avail_start && 278 pgno <= seg->avail_end) { 279 uvmexp.npages++; 280 } 281 } 282 283 /* 284 * Add pages to free pool. 285 */ 286 uvm_pmr_freepages(&seg->pgs[seg->avail_start - seg->start], 287 seg->avail_end - seg->avail_start); 288 } 289 290 /* 291 * pass up the values of virtual_space_start and 292 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper 293 * layers of the VM. 294 */ 295 296 *kvm_startp = round_page(virtual_space_start); 297 *kvm_endp = trunc_page(virtual_space_end); 298 299 /* 300 * init locks for kernel threads 301 */ 302 mtx_init(&uvm.aiodoned_lock, IPL_BIO); 303 304 /* 305 * init reserve thresholds 306 * XXXCDC - values may need adjusting 307 */ 308 uvmexp.reserve_pagedaemon = 4; 309 uvmexp.reserve_kernel = 6; 310 uvmexp.anonminpct = 10; 311 uvmexp.vnodeminpct = 10; 312 uvmexp.vtextminpct = 5; 313 uvmexp.anonmin = uvmexp.anonminpct * 256 / 100; 314 uvmexp.vnodemin = uvmexp.vnodeminpct * 256 / 100; 315 uvmexp.vtextmin = uvmexp.vtextminpct * 256 / 100; 316 317 /* 318 * determine if we should zero pages in the idle loop. 319 */ 320 321 uvm.page_idle_zero = vm_page_zero_enable; 322 323 /* 324 * done! 325 */ 326 327 uvm.page_init_done = TRUE; 328} 329 330/* 331 * uvm_setpagesize: set the page size 332 * 333 * => sets page_shift and page_mask from uvmexp.pagesize. 334 */ 335 336void 337uvm_setpagesize(void) 338{ 339 if (uvmexp.pagesize == 0) 340 uvmexp.pagesize = DEFAULT_PAGE_SIZE; 341 uvmexp.pagemask = uvmexp.pagesize - 1; 342 if ((uvmexp.pagemask & uvmexp.pagesize) != 0) 343 panic("uvm_setpagesize: page size not a power of two"); 344 for (uvmexp.pageshift = 0; ; uvmexp.pageshift++) 345 if ((1 << uvmexp.pageshift) == uvmexp.pagesize) 346 break; 347} 348 349/* 350 * uvm_pageboot_alloc: steal memory from physmem for bootstrapping 351 */ 352 353vaddr_t 354uvm_pageboot_alloc(vsize_t size) 355{ 356#if defined(PMAP_STEAL_MEMORY) 357 vaddr_t addr; 358 359 /* 360 * defer bootstrap allocation to MD code (it may want to allocate 361 * from a direct-mapped segment). pmap_steal_memory should round 362 * off virtual_space_start/virtual_space_end. 363 */ 364 365 addr = pmap_steal_memory(size, &virtual_space_start, 366 &virtual_space_end); 367 368 return(addr); 369 370#else /* !PMAP_STEAL_MEMORY */ 371 372 static boolean_t initialized = FALSE; 373 vaddr_t addr, vaddr; 374 paddr_t paddr; 375 376 /* round to page size */ 377 size = round_page(size); 378 379 /* 380 * on first call to this function, initialize ourselves. 381 */ 382 if (initialized == FALSE) { 383 pmap_virtual_space(&virtual_space_start, &virtual_space_end); 384 385 /* round it the way we like it */ 386 virtual_space_start = round_page(virtual_space_start); 387 virtual_space_end = trunc_page(virtual_space_end); 388 389 initialized = TRUE; 390 } 391 392 /* 393 * allocate virtual memory for this request 394 */ 395 if (virtual_space_start == virtual_space_end || 396 (virtual_space_end - virtual_space_start) < size) 397 panic("uvm_pageboot_alloc: out of virtual space"); 398 399 addr = virtual_space_start; 400 401#ifdef PMAP_GROWKERNEL 402 /* 403 * If the kernel pmap can't map the requested space, 404 * then allocate more resources for it. 405 */ 406 if (uvm_maxkaddr < (addr + size)) { 407 uvm_maxkaddr = pmap_growkernel(addr + size); 408 if (uvm_maxkaddr < (addr + size)) 409 panic("uvm_pageboot_alloc: pmap_growkernel() failed"); 410 } 411#endif 412 413 virtual_space_start += size; 414 415 /* 416 * allocate and mapin physical pages to back new virtual pages 417 */ 418 419 for (vaddr = round_page(addr) ; vaddr < addr + size ; 420 vaddr += PAGE_SIZE) { 421 422 if (!uvm_page_physget(&paddr)) 423 panic("uvm_pageboot_alloc: out of memory"); 424 425 /* 426 * Note this memory is no longer managed, so using 427 * pmap_kenter is safe. 428 */ 429 pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE); 430 } 431 pmap_update(pmap_kernel()); 432 return(addr); 433#endif /* PMAP_STEAL_MEMORY */ 434} 435 436#if !defined(PMAP_STEAL_MEMORY) 437/* 438 * uvm_page_physget: "steal" one page from the vm_physmem structure. 439 * 440 * => attempt to allocate it off the end of a segment in which the "avail" 441 * values match the start/end values. if we can't do that, then we 442 * will advance both values (making them equal, and removing some 443 * vm_page structures from the non-avail area). 444 * => return false if out of memory. 445 */ 446 447boolean_t 448uvm_page_physget(paddr_t *paddrp) 449{ 450 int lcv; 451 struct vm_physseg *seg; 452 453 /* pass 1: try allocating from a matching end */ 454#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \ 455 (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 456 for (lcv = vm_nphysseg - 1, seg = vm_physmem + lcv; lcv >= 0; 457 lcv--, seg--) 458#else 459 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++) 460#endif 461 { 462 if (uvm.page_init_done == TRUE) 463 panic("uvm_page_physget: called _after_ bootstrap"); 464 465 /* try from front */ 466 if (seg->avail_start == seg->start && 467 seg->avail_start < seg->avail_end) { 468 *paddrp = ptoa(seg->avail_start); 469 seg->avail_start++; 470 seg->start++; 471 /* nothing left? nuke it */ 472 if (seg->avail_start == seg->end) { 473 if (vm_nphysseg == 1) 474 panic("uvm_page_physget: out of memory!"); 475 vm_nphysseg--; 476 for (; lcv < vm_nphysseg; lcv++, seg++) 477 /* structure copy */ 478 seg[0] = seg[1]; 479 } 480 return (TRUE); 481 } 482 483 /* try from rear */ 484 if (seg->avail_end == seg->end && 485 seg->avail_start < seg->avail_end) { 486 *paddrp = ptoa(seg->avail_end - 1); 487 seg->avail_end--; 488 seg->end--; 489 /* nothing left? nuke it */ 490 if (seg->avail_end == seg->start) { 491 if (vm_nphysseg == 1) 492 panic("uvm_page_physget: out of memory!"); 493 vm_nphysseg--; 494 for (; lcv < vm_nphysseg ; lcv++, seg++) 495 /* structure copy */ 496 seg[0] = seg[1]; 497 } 498 return (TRUE); 499 } 500 } 501 502 /* pass2: forget about matching ends, just allocate something */ 503#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \ 504 (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 505 for (lcv = vm_nphysseg - 1, seg = vm_physmem + lcv; lcv >= 0; 506 lcv--, seg--) 507#else 508 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++) 509#endif 510 { 511 512 /* any room in this bank? */ 513 if (seg->avail_start >= seg->avail_end) 514 continue; /* nope */ 515 516 *paddrp = ptoa(seg->avail_start); 517 seg->avail_start++; 518 /* truncate! */ 519 seg->start = seg->avail_start; 520 521 /* nothing left? nuke it */ 522 if (seg->avail_start == seg->end) { 523 if (vm_nphysseg == 1) 524 panic("uvm_page_physget: out of memory!"); 525 vm_nphysseg--; 526 for (; lcv < vm_nphysseg ; lcv++, seg++) 527 /* structure copy */ 528 seg[0] = seg[1]; 529 } 530 return (TRUE); 531 } 532 533 return (FALSE); /* whoops! */ 534} 535 536#endif /* PMAP_STEAL_MEMORY */ 537 538/* 539 * uvm_page_physload: load physical memory into VM system 540 * 541 * => all args are PFs 542 * => all pages in start/end get vm_page structures 543 * => areas marked by avail_start/avail_end get added to the free page pool 544 * => we are limited to VM_PHYSSEG_MAX physical memory segments 545 */ 546 547void 548uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start, 549 paddr_t avail_end, int flags) 550{ 551 int preload, lcv; 552 psize_t npages; 553 struct vm_page *pgs; 554 struct vm_physseg *ps, *seg; 555 556#ifdef DIAGNOSTIC 557 if (uvmexp.pagesize == 0) 558 panic("uvm_page_physload: page size not set!"); 559 560 if (start >= end) 561 panic("uvm_page_physload: start >= end"); 562#endif 563 564 /* 565 * do we have room? 566 */ 567 if (vm_nphysseg == VM_PHYSSEG_MAX) { 568 printf("uvm_page_physload: unable to load physical memory " 569 "segment\n"); 570 printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n", 571 VM_PHYSSEG_MAX, (long long)start, (long long)end); 572 printf("\tincrease VM_PHYSSEG_MAX\n"); 573 return; 574 } 575 576 /* 577 * check to see if this is a "preload" (i.e. uvm_mem_init hasn't been 578 * called yet, so malloc is not available). 579 */ 580 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg; lcv++, seg++) { 581 if (seg->pgs) 582 break; 583 } 584 preload = (lcv == vm_nphysseg); 585 586 /* 587 * if VM is already running, attempt to malloc() vm_page structures 588 */ 589 if (!preload) { 590 /* 591 * XXXCDC: need some sort of lockout for this case 592 * right now it is only used by devices so it should be alright. 593 */ 594 paddr_t paddr; 595 596 npages = end - start; /* # of pages */ 597 598 pgs = (struct vm_page *)uvm_km_zalloc(kernel_map, 599 npages * sizeof(*pgs)); 600 if (pgs == NULL) { 601 printf("uvm_page_physload: can not malloc vm_page " 602 "structs for segment\n"); 603 printf("\tignoring 0x%lx -> 0x%lx\n", start, end); 604 return; 605 } 606 /* init phys_addr and free pages, XXX uvmexp.npages */ 607 for (lcv = 0, paddr = ptoa(start); lcv < npages; 608 lcv++, paddr += PAGE_SIZE) { 609 pgs[lcv].phys_addr = paddr; 610#ifdef __HAVE_VM_PAGE_MD 611 VM_MDPAGE_INIT(&pgs[lcv]); 612#endif 613 if (atop(paddr) >= avail_start && 614 atop(paddr) <= avail_end) { 615 if (flags & PHYSLOAD_DEVICE) { 616 atomic_setbits_int(&pgs[lcv].pg_flags, 617 PG_DEV); 618 pgs[lcv].wire_count = 1; 619 } else { 620#if defined(VM_PHYSSEG_NOADD) 621 panic("uvm_page_physload: tried to add RAM after vm_mem_init"); 622#endif 623 } 624 } 625 } 626 627 /* 628 * Add pages to free pool. 629 */ 630 if ((flags & PHYSLOAD_DEVICE) == 0) { 631 uvm_pmr_freepages(&pgs[avail_start - start], 632 avail_end - avail_start); 633 } 634 635 /* XXXCDC: need hook to tell pmap to rebuild pv_list, etc... */ 636 } else { 637 638 /* gcc complains if these don't get init'd */ 639 pgs = NULL; 640 npages = 0; 641 642 } 643 644 /* 645 * now insert us in the proper place in vm_physmem[] 646 */ 647 648#if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM) 649 650 /* random: put it at the end (easy!) */ 651 ps = &vm_physmem[vm_nphysseg]; 652 653#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 654 655 { 656 int x; 657 /* sort by address for binary search */ 658 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg; lcv++, seg++) 659 if (start < seg->start) 660 break; 661 ps = seg; 662 /* move back other entries, if necessary ... */ 663 for (x = vm_nphysseg, seg = vm_physmem + x - 1; x > lcv; 664 x--, seg--) 665 /* structure copy */ 666 seg[1] = seg[0]; 667 } 668 669#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) 670 671 { 672 int x; 673 /* sort by largest segment first */ 674 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg; lcv++, seg++) 675 if ((end - start) > 676 (seg->end - seg->start)) 677 break; 678 ps = &vm_physmem[lcv]; 679 /* move back other entries, if necessary ... */ 680 for (x = vm_nphysseg, seg = vm_physmem + x - 1; x > lcv; 681 x--, seg--) 682 /* structure copy */ 683 seg[1] = seg[0]; 684 } 685 686#else 687 688 panic("uvm_page_physload: unknown physseg strategy selected!"); 689 690#endif 691 692 ps->start = start; 693 ps->end = end; 694 ps->avail_start = avail_start; 695 ps->avail_end = avail_end; 696 if (preload) { 697 ps->pgs = NULL; 698 } else { 699 ps->pgs = pgs; 700 ps->lastpg = pgs + npages - 1; 701 } 702 vm_nphysseg++; 703 704 /* 705 * done! 706 */ 707 708 return; 709} 710 711#ifdef DDB /* XXXCDC: TMP TMP TMP DEBUG DEBUG DEBUG */ 712 713void uvm_page_physdump(void); /* SHUT UP GCC */ 714 715/* call from DDB */ 716void 717uvm_page_physdump(void) 718{ 719 int lcv; 720 struct vm_physseg *seg; 721 722 printf("uvm_page_physdump: physical memory config [segs=%d of %d]:\n", 723 vm_nphysseg, VM_PHYSSEG_MAX); 724 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++) 725 printf("0x%llx->0x%llx [0x%llx->0x%llx]\n", 726 (long long)seg->start, 727 (long long)seg->end, 728 (long long)seg->avail_start, 729 (long long)seg->avail_end); 730 printf("STRATEGY = "); 731 switch (VM_PHYSSEG_STRAT) { 732 case VM_PSTRAT_RANDOM: printf("RANDOM\n"); break; 733 case VM_PSTRAT_BSEARCH: printf("BSEARCH\n"); break; 734 case VM_PSTRAT_BIGFIRST: printf("BIGFIRST\n"); break; 735 default: printf("<<UNKNOWN>>!!!!\n"); 736 } 737} 738#endif 739 740void 741uvm_shutdown(void) 742{ 743#ifdef UVM_SWAP_ENCRYPT 744 uvm_swap_finicrypt_all(); 745#endif 746} 747 748/* 749 * Perform insert of a given page in the specified anon of obj. 750 * This is basically, uvm_pagealloc, but with the page already given. 751 */ 752void 753uvm_pagealloc_pg(struct vm_page *pg, struct uvm_object *obj, voff_t off, 754 struct vm_anon *anon) 755{ 756 int flags; 757 758 flags = PG_BUSY | PG_FAKE; 759 pg->offset = off; 760 pg->uobject = obj; 761 pg->uanon = anon; 762 763 if (anon) { 764 anon->an_page = pg; 765 flags |= PQ_ANON; 766 } else if (obj) 767 uvm_pageinsert(pg); 768 atomic_setbits_int(&pg->pg_flags, flags); 769#if defined(UVM_PAGE_TRKOWN) 770 pg->owner_tag = NULL; 771#endif 772 UVM_PAGE_OWN(pg, "new alloc"); 773} 774 775/* 776 * uvm_pglistalloc: allocate a list of pages 777 * 778 * => allocated pages are placed at the tail of rlist. rlist is 779 * assumed to be properly initialized by caller. 780 * => returns 0 on success or errno on failure 781 * => doesn't take into account clean non-busy pages on inactive list 782 * that could be used(?) 783 * => params: 784 * size the size of the allocation, rounded to page size. 785 * low the low address of the allowed allocation range. 786 * high the high address of the allowed allocation range. 787 * alignment memory must be aligned to this power-of-two boundary. 788 * boundary no segment in the allocation may cross this 789 * power-of-two boundary (relative to zero). 790 * => flags: 791 * UVM_PLA_NOWAIT fail if allocation fails 792 * UVM_PLA_WAITOK wait for memory to become avail 793 * UVM_PLA_ZERO return zeroed memory 794 */ 795int 796uvm_pglistalloc(psize_t size, paddr_t low, paddr_t high, paddr_t alignment, 797 paddr_t boundary, struct pglist *rlist, int nsegs, int flags) 798{ 799 KASSERT((alignment & (alignment - 1)) == 0); 800 KASSERT((boundary & (boundary - 1)) == 0); 801 KASSERT(!(flags & UVM_PLA_WAITOK) ^ !(flags & UVM_PLA_NOWAIT)); 802 803 if (size == 0) 804 return (EINVAL); 805 size = atop(round_page(size)); 806 807 /* 808 * check to see if we need to generate some free pages waking 809 * the pagedaemon. 810 */ 811 if ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freemin || 812 ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freetarg && 813 (uvmexp.inactive + BUFPAGES_INACT) < uvmexp.inactarg)) 814 wakeup(&uvm.pagedaemon); 815 816 /* 817 * XXX uvm_pglistalloc is currently only used for kernel 818 * objects. Unlike the checks in uvm_pagealloc, below, here 819 * we are always allowed to use the kernel reseve. However, we 820 * have to enforce the pagedaemon reserve here or allocations 821 * via this path could consume everything and we can't 822 * recover in the page daemon. 823 */ 824 again: 825 if ((uvmexp.free <= uvmexp.reserve_pagedaemon + size && 826 !((curproc == uvm.pagedaemon_proc) || 827 (curproc == syncerproc)))) { 828 if (flags & UVM_PLA_WAITOK) { 829 uvm_wait("uvm_pglistalloc"); 830 goto again; 831 } 832 return (ENOMEM); 833 } 834 835 if ((high & PAGE_MASK) != PAGE_MASK) { 836 printf("uvm_pglistalloc: Upper boundary 0x%lx " 837 "not on pagemask.\n", (unsigned long)high); 838 } 839 840 /* 841 * Our allocations are always page granularity, so our alignment 842 * must be, too. 843 */ 844 if (alignment < PAGE_SIZE) 845 alignment = PAGE_SIZE; 846 847 low = atop(roundup(low, alignment)); 848 /* 849 * high + 1 may result in overflow, in which case high becomes 0x0, 850 * which is the 'don't care' value. 851 * The only requirement in that case is that low is also 0x0, or the 852 * low<high assert will fail. 853 */ 854 high = atop(high + 1); 855 alignment = atop(alignment); 856 if (boundary < PAGE_SIZE && boundary != 0) 857 boundary = PAGE_SIZE; 858 boundary = atop(boundary); 859 860 return uvm_pmr_getpages(size, low, high, alignment, boundary, nsegs, 861 flags, rlist); 862} 863 864/* 865 * uvm_pglistfree: free a list of pages 866 * 867 * => pages should already be unmapped 868 */ 869void 870uvm_pglistfree(struct pglist *list) 871{ 872 uvm_pmr_freepageq(list); 873} 874 875/* 876 * interface used by the buffer cache to allocate a buffer at a time. 877 * The pages are allocated wired in DMA accessible memory 878 */ 879int 880uvm_pagealloc_multi(struct uvm_object *obj, voff_t off, vsize_t size, 881 int flags) 882{ 883 struct pglist plist; 884 struct vm_page *pg; 885 int i, r; 886 887 888 TAILQ_INIT(&plist); 889 r = uvm_pglistalloc(size, dma_constraint.ucr_low, 890 dma_constraint.ucr_high, 0, 0, &plist, atop(round_page(size)), 891 flags); 892 if (r != 0) 893 return(r); 894 i = 0; 895 while ((pg = TAILQ_FIRST(&plist)) != NULL) { 896 pg->wire_count = 1; 897 atomic_setbits_int(&pg->pg_flags, PG_CLEAN | PG_FAKE); 898 KASSERT((pg->pg_flags & PG_DEV) == 0); 899 TAILQ_REMOVE(&plist, pg, pageq); 900 uvm_pagealloc_pg(pg, obj, off + ptoa(i++), NULL); 901 } 902 return(0); 903} 904 905/* 906 * interface used by the buffer cache to reallocate a buffer at a time. 907 * The pages are reallocated wired outside the DMA accessible region. 908 * 909 */ 910int 911uvm_pagerealloc_multi(struct uvm_object *obj, voff_t off, vsize_t size, 912 int flags, struct uvm_constraint_range *where) 913{ 914 struct pglist plist; 915 struct vm_page *pg, *tpg; 916 int i,r; 917 voff_t offset; 918 919 TAILQ_INIT(&plist); 920 if (size == 0) 921 panic("size 0 uvm_pagerealloc"); 922 r = uvm_pglistalloc(size, where->ucr_low, where->ucr_high, 0, 923 0, &plist, atop(round_page(size)), flags); 924 if (r != 0) 925 return(r); 926 i = 0; 927 while((pg = TAILQ_FIRST(&plist)) != NULL) { 928 offset = off + ptoa(i++); 929 tpg = uvm_pagelookup(obj, offset); 930 KASSERT(tpg != NULL); 931 pg->wire_count = 1; 932 atomic_setbits_int(&pg->pg_flags, PG_CLEAN | PG_FAKE); 933 KASSERT((pg->pg_flags & PG_DEV) == 0); 934 TAILQ_REMOVE(&plist, pg, pageq); 935 uvm_pagecopy(tpg, pg); 936 KASSERT(tpg->wire_count == 1); 937 tpg->wire_count = 0; 938 uvm_pagefree(tpg); 939 uvm_pagealloc_pg(pg, obj, offset, NULL); 940 } 941 return(0); 942} 943 944/* 945 * uvm_pagealloc_strat: allocate vm_page from a particular free list. 946 * 947 * => return null if no pages free 948 * => wake up pagedaemon if number of free pages drops below low water mark 949 * => only one of obj or anon can be non-null 950 * => caller must activate/deactivate page if it is not wired. 951 */ 952 953struct vm_page * 954uvm_pagealloc(struct uvm_object *obj, voff_t off, struct vm_anon *anon, 955 int flags) 956{ 957 struct vm_page *pg; 958 struct pglist pgl; 959 int pmr_flags; 960 boolean_t use_reserve; 961 962 KASSERT(obj == NULL || anon == NULL); 963 KASSERT(off == trunc_page(off)); 964 965 /* 966 * check to see if we need to generate some free pages waking 967 * the pagedaemon. 968 */ 969 if ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freemin || 970 ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freetarg && 971 (uvmexp.inactive + BUFPAGES_INACT) < uvmexp.inactarg)) 972 wakeup(&uvm.pagedaemon); 973 974 /* 975 * fail if any of these conditions is true: 976 * [1] there really are no free pages, or 977 * [2] only kernel "reserved" pages remain and 978 * the page isn't being allocated to a kernel object. 979 * [3] only pagedaemon "reserved" pages remain and 980 * the requestor isn't the pagedaemon. 981 */ 982 983 use_reserve = (flags & UVM_PGA_USERESERVE) || 984 (obj && UVM_OBJ_IS_KERN_OBJECT(obj)); 985 if ((uvmexp.free <= uvmexp.reserve_kernel && !use_reserve) || 986 (uvmexp.free <= uvmexp.reserve_pagedaemon && 987 !((curproc == uvm.pagedaemon_proc) || 988 (curproc == syncerproc)))) 989 goto fail; 990 991 pmr_flags = UVM_PLA_NOWAIT; 992 if (flags & UVM_PGA_ZERO) 993 pmr_flags |= UVM_PLA_ZERO; 994 TAILQ_INIT(&pgl); 995 if (uvm_pmr_getpages(1, 0, 0, 1, 0, 1, pmr_flags, &pgl) != 0) 996 goto fail; 997 998 pg = TAILQ_FIRST(&pgl); 999 KASSERT(pg != NULL && TAILQ_NEXT(pg, pageq) == NULL); 1000 1001 uvm_pagealloc_pg(pg, obj, off, anon); 1002 KASSERT((pg->pg_flags & PG_DEV) == 0); 1003 if (flags & UVM_PGA_ZERO) 1004 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 1005 else 1006 atomic_setbits_int(&pg->pg_flags, PG_CLEAN); 1007 1008 return(pg); 1009 1010 fail: 1011 return (NULL); 1012} 1013 1014/* 1015 * uvm_pagerealloc: reallocate a page from one object to another 1016 */ 1017 1018void 1019uvm_pagerealloc(struct vm_page *pg, struct uvm_object *newobj, voff_t newoff) 1020{ 1021 1022 /* 1023 * remove it from the old object 1024 */ 1025 1026 if (pg->uobject) { 1027 uvm_pageremove(pg); 1028 } 1029 1030 /* 1031 * put it in the new object 1032 */ 1033 1034 if (newobj) { 1035 pg->uobject = newobj; 1036 pg->offset = newoff; 1037 pg->pg_version++; 1038 uvm_pageinsert(pg); 1039 } 1040} 1041 1042 1043/* 1044 * uvm_pagefree: free page 1045 * 1046 * => erase page's identity (i.e. remove from object) 1047 * => put page on free list 1048 * => caller must lock page queues 1049 * => assumes all valid mappings of pg are gone 1050 */ 1051 1052void 1053uvm_pagefree(struct vm_page *pg) 1054{ 1055 int saved_loan_count = pg->loan_count; 1056 u_int flags_to_clear = 0; 1057 1058#ifdef DEBUG 1059 if (pg->uobject == (void *)0xdeadbeef && 1060 pg->uanon == (void *)0xdeadbeef) { 1061 panic("uvm_pagefree: freeing free page %p", pg); 1062 } 1063#endif 1064 1065 KASSERT((pg->pg_flags & PG_DEV) == 0); 1066 1067 /* 1068 * if the page was an object page (and thus "TABLED"), remove it 1069 * from the object. 1070 */ 1071 1072 if (pg->pg_flags & PG_TABLED) { 1073 1074 /* 1075 * if the object page is on loan we are going to drop ownership. 1076 * it is possible that an anon will take over as owner for this 1077 * page later on. the anon will want a !PG_CLEAN page so that 1078 * it knows it needs to allocate swap if it wants to page the 1079 * page out. 1080 */ 1081 1082 /* in case an anon takes over */ 1083 if (saved_loan_count) 1084 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 1085 uvm_pageremove(pg); 1086 1087 /* 1088 * if our page was on loan, then we just lost control over it 1089 * (in fact, if it was loaned to an anon, the anon may have 1090 * already taken over ownership of the page by now and thus 1091 * changed the loan_count [e.g. in uvmfault_anonget()]) we just 1092 * return (when the last loan is dropped, then the page can be 1093 * freed by whatever was holding the last loan). 1094 */ 1095 1096 if (saved_loan_count) 1097 return; 1098 } else if (saved_loan_count && pg->uanon) { 1099 /* 1100 * if our page is owned by an anon and is loaned out to the 1101 * kernel then we just want to drop ownership and return. 1102 * the kernel must free the page when all its loans clear ... 1103 * note that the kernel can't change the loan status of our 1104 * page as long as we are holding PQ lock. 1105 */ 1106 atomic_clearbits_int(&pg->pg_flags, PQ_ANON); 1107 pg->uanon->an_page = NULL; 1108 pg->uanon = NULL; 1109 return; 1110 } 1111 KASSERT(saved_loan_count == 0); 1112 1113 /* 1114 * now remove the page from the queues 1115 */ 1116 1117 if (pg->pg_flags & PQ_ACTIVE) { 1118 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1119 flags_to_clear |= PQ_ACTIVE; 1120 uvmexp.active--; 1121 } 1122 if (pg->pg_flags & PQ_INACTIVE) { 1123 if (pg->pg_flags & PQ_SWAPBACKED) 1124 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); 1125 else 1126 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); 1127 flags_to_clear |= PQ_INACTIVE; 1128 uvmexp.inactive--; 1129 } 1130 1131 /* 1132 * if the page was wired, unwire it now. 1133 */ 1134 1135 if (pg->wire_count) { 1136 pg->wire_count = 0; 1137 uvmexp.wired--; 1138 } 1139 if (pg->uanon) { 1140 pg->uanon->an_page = NULL; 1141 pg->uanon = NULL; 1142 flags_to_clear |= PQ_ANON; 1143 } 1144 1145 /* 1146 * Clean page state bits. 1147 */ 1148 flags_to_clear |= PQ_AOBJ; /* XXX: find culprit */ 1149 flags_to_clear |= PQ_ENCRYPT|PG_ZERO|PG_FAKE|PG_BUSY|PG_RELEASED| 1150 PG_CLEAN|PG_CLEANCHK; 1151 atomic_clearbits_int(&pg->pg_flags, flags_to_clear); 1152 1153 /* 1154 * and put on free queue 1155 */ 1156 1157#ifdef DEBUG 1158 pg->uobject = (void *)0xdeadbeef; 1159 pg->offset = 0xdeadbeef; 1160 pg->uanon = (void *)0xdeadbeef; 1161#endif 1162 1163 uvm_pmr_freepages(pg, 1); 1164 1165 if (uvmexp.zeropages < UVM_PAGEZERO_TARGET) 1166 uvm.page_idle_zero = vm_page_zero_enable; 1167} 1168 1169/* 1170 * uvm_page_unbusy: unbusy an array of pages. 1171 * 1172 * => pages must either all belong to the same object, or all belong to anons. 1173 * => if pages are anon-owned, anons must have 0 refcount. 1174 */ 1175 1176void 1177uvm_page_unbusy(struct vm_page **pgs, int npgs) 1178{ 1179 struct vm_page *pg; 1180 struct uvm_object *uobj; 1181 int i; 1182 1183 for (i = 0; i < npgs; i++) { 1184 pg = pgs[i]; 1185 1186 if (pg == NULL || pg == PGO_DONTCARE) { 1187 continue; 1188 } 1189 if (pg->pg_flags & PG_WANTED) { 1190 wakeup(pg); 1191 } 1192 if (pg->pg_flags & PG_RELEASED) { 1193 uobj = pg->uobject; 1194 if (uobj != NULL) { 1195 uvm_lock_pageq(); 1196 pmap_page_protect(pg, VM_PROT_NONE); 1197 /* XXX won't happen right now */ 1198 if (pg->pg_flags & PQ_AOBJ) 1199 uao_dropswap(uobj, 1200 pg->offset >> PAGE_SHIFT); 1201 uvm_pagefree(pg); 1202 uvm_unlock_pageq(); 1203 } else { 1204 atomic_clearbits_int(&pg->pg_flags, PG_BUSY); 1205 UVM_PAGE_OWN(pg, NULL); 1206 uvm_anfree(pg->uanon); 1207 } 1208 } else { 1209 atomic_clearbits_int(&pg->pg_flags, PG_WANTED|PG_BUSY); 1210 UVM_PAGE_OWN(pg, NULL); 1211 } 1212 } 1213} 1214 1215#if defined(UVM_PAGE_TRKOWN) 1216/* 1217 * uvm_page_own: set or release page ownership 1218 * 1219 * => this is a debugging function that keeps track of who sets PG_BUSY 1220 * and where they do it. it can be used to track down problems 1221 * such a process setting "PG_BUSY" and never releasing it. 1222 * => if "tag" is NULL then we are releasing page ownership 1223 */ 1224void 1225uvm_page_own(struct vm_page *pg, char *tag) 1226{ 1227 /* gain ownership? */ 1228 if (tag) { 1229 if (pg->owner_tag) { 1230 printf("uvm_page_own: page %p already owned " 1231 "by proc %d [%s]\n", pg, 1232 pg->owner, pg->owner_tag); 1233 panic("uvm_page_own"); 1234 } 1235 pg->owner = (curproc) ? curproc->p_pid : (pid_t) -1; 1236 pg->owner_tag = tag; 1237 return; 1238 } 1239 1240 /* drop ownership */ 1241 if (pg->owner_tag == NULL) { 1242 printf("uvm_page_own: dropping ownership of an non-owned " 1243 "page (%p)\n", pg); 1244 panic("uvm_page_own"); 1245 } 1246 pg->owner_tag = NULL; 1247 return; 1248} 1249#endif 1250 1251/* 1252 * uvm_pageidlezero: zero free pages while the system is idle. 1253 * 1254 * => we do at least one iteration per call, if we are below the target. 1255 * => we loop until we either reach the target or whichqs indicates that 1256 * there is a process ready to run. 1257 */ 1258void 1259uvm_pageidlezero(void) 1260{ 1261#if 0 /* disabled: need new code */ 1262 struct vm_page *pg; 1263 struct pgfreelist *pgfl; 1264 int free_list; 1265 1266 do { 1267 uvm_lock_fpageq(); 1268 1269 if (uvmexp.zeropages >= UVM_PAGEZERO_TARGET) { 1270 uvm.page_idle_zero = FALSE; 1271 uvm_unlock_fpageq(); 1272 return; 1273 } 1274 1275 for (free_list = 0; free_list < VM_NFREELIST; free_list++) { 1276 pgfl = &uvm.page_free[free_list]; 1277 if ((pg = TAILQ_FIRST(&pgfl->pgfl_queues[ 1278 PGFL_UNKNOWN])) != NULL) 1279 break; 1280 } 1281 1282 if (pg == NULL) { 1283 /* 1284 * No non-zero'd pages; don't bother trying again 1285 * until we know we have non-zero'd pages free. 1286 */ 1287 uvm.page_idle_zero = FALSE; 1288 uvm_unlock_fpageq(); 1289 return; 1290 } 1291 1292 TAILQ_REMOVE(&pgfl->pgfl_queues[PGFL_UNKNOWN], pg, pageq); 1293 uvmexp.free--; 1294 uvm_unlock_fpageq(); 1295 1296#ifdef PMAP_PAGEIDLEZERO 1297 if (PMAP_PAGEIDLEZERO(pg) == FALSE) { 1298 /* 1299 * The machine-dependent code detected some 1300 * reason for us to abort zeroing pages, 1301 * probably because there is a process now 1302 * ready to run. 1303 */ 1304 uvm_lock_fpageq(); 1305 TAILQ_INSERT_HEAD(&pgfl->pgfl_queues[PGFL_UNKNOWN], 1306 pg, pageq); 1307 uvmexp.free++; 1308 uvmexp.zeroaborts++; 1309 uvm_unlock_fpageq(); 1310 return; 1311 } 1312#else 1313 /* 1314 * XXX This will toast the cache unless the pmap_zero_page() 1315 * XXX implementation does uncached access. 1316 */ 1317 pmap_zero_page(pg); 1318#endif 1319 atomic_setbits_int(&pg->pg_flags, PG_ZERO); 1320 1321 uvm_lock_fpageq(); 1322 TAILQ_INSERT_HEAD(&pgfl->pgfl_queues[PGFL_ZEROS], pg, pageq); 1323 uvmexp.free++; 1324 uvmexp.zeropages++; 1325 uvm_unlock_fpageq(); 1326 } while (curcpu_is_idle()); 1327#endif /* 0 */ 1328} 1329 1330/* 1331 * when VM_PHYSSEG_MAX is 1, we can simplify these functions 1332 */ 1333 1334#if VM_PHYSSEG_MAX > 1 1335/* 1336 * vm_physseg_find: find vm_physseg structure that belongs to a PA 1337 */ 1338int 1339vm_physseg_find(paddr_t pframe, int *offp) 1340{ 1341 struct vm_physseg *seg; 1342 1343#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 1344 /* binary search for it */ 1345 int start, len, try; 1346 1347 /* 1348 * if try is too large (thus target is less than than try) we reduce 1349 * the length to trunc(len/2) [i.e. everything smaller than "try"] 1350 * 1351 * if the try is too small (thus target is greater than try) then 1352 * we set the new start to be (try + 1). this means we need to 1353 * reduce the length to (round(len/2) - 1). 1354 * 1355 * note "adjust" below which takes advantage of the fact that 1356 * (round(len/2) - 1) == trunc((len - 1) / 2) 1357 * for any value of len we may have 1358 */ 1359 1360 for (start = 0, len = vm_nphysseg ; len != 0 ; len = len / 2) { 1361 try = start + (len / 2); /* try in the middle */ 1362 seg = vm_physmem + try; 1363 1364 /* start past our try? */ 1365 if (pframe >= seg->start) { 1366 /* was try correct? */ 1367 if (pframe < seg->end) { 1368 if (offp) 1369 *offp = pframe - seg->start; 1370 return(try); /* got it */ 1371 } 1372 start = try + 1; /* next time, start here */ 1373 len--; /* "adjust" */ 1374 } else { 1375 /* 1376 * pframe before try, just reduce length of 1377 * region, done in "for" loop 1378 */ 1379 } 1380 } 1381 return(-1); 1382 1383#else 1384 /* linear search for it */ 1385 int lcv; 1386 1387 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++) { 1388 if (pframe >= seg->start && pframe < seg->end) { 1389 if (offp) 1390 *offp = pframe - seg->start; 1391 return(lcv); /* got it */ 1392 } 1393 } 1394 return(-1); 1395 1396#endif 1397} 1398 1399/* 1400 * PHYS_TO_VM_PAGE: find vm_page for a PA. used by MI code to get vm_pages 1401 * back from an I/O mapping (ugh!). used in some MD code as well. 1402 */ 1403struct vm_page * 1404PHYS_TO_VM_PAGE(paddr_t pa) 1405{ 1406 paddr_t pf = atop(pa); 1407 int off; 1408 int psi; 1409 1410 psi = vm_physseg_find(pf, &off); 1411 1412 return ((psi == -1) ? NULL : &vm_physmem[psi].pgs[off]); 1413} 1414#endif /* VM_PHYSSEG_MAX > 1 */ 1415 1416/* 1417 * uvm_pagelookup: look up a page 1418 */ 1419struct vm_page * 1420uvm_pagelookup(struct uvm_object *obj, voff_t off) 1421{ 1422 /* XXX if stack is too much, handroll */ 1423 struct vm_page pg; 1424 1425 pg.offset = off; 1426 return (RB_FIND(uvm_objtree, &obj->memt, &pg)); 1427} 1428 1429/* 1430 * uvm_pagewire: wire the page, thus removing it from the daemon's grasp 1431 * 1432 * => caller must lock page queues 1433 */ 1434void 1435uvm_pagewire(struct vm_page *pg) 1436{ 1437 if (pg->wire_count == 0) { 1438 if (pg->pg_flags & PQ_ACTIVE) { 1439 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1440 atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE); 1441 uvmexp.active--; 1442 } 1443 if (pg->pg_flags & PQ_INACTIVE) { 1444 if (pg->pg_flags & PQ_SWAPBACKED) 1445 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); 1446 else 1447 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); 1448 atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE); 1449 uvmexp.inactive--; 1450 } 1451 uvmexp.wired++; 1452 } 1453 pg->wire_count++; 1454} 1455 1456/* 1457 * uvm_pageunwire: unwire the page. 1458 * 1459 * => activate if wire count goes to zero. 1460 * => caller must lock page queues 1461 */ 1462void 1463uvm_pageunwire(struct vm_page *pg) 1464{ 1465 pg->wire_count--; 1466 if (pg->wire_count == 0) { 1467 TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq); 1468 uvmexp.active++; 1469 atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE); 1470 uvmexp.wired--; 1471 } 1472} 1473 1474/* 1475 * uvm_pagedeactivate: deactivate page -- no pmaps have access to page 1476 * 1477 * => caller must lock page queues 1478 * => caller must check to make sure page is not wired 1479 * => object that page belongs to must be locked (so we can adjust pg->flags) 1480 */ 1481void 1482uvm_pagedeactivate(struct vm_page *pg) 1483{ 1484 if (pg->pg_flags & PQ_ACTIVE) { 1485 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1486 atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE); 1487 uvmexp.active--; 1488 } 1489 if ((pg->pg_flags & PQ_INACTIVE) == 0) { 1490 KASSERT(pg->wire_count == 0); 1491 if (pg->pg_flags & PQ_SWAPBACKED) 1492 TAILQ_INSERT_TAIL(&uvm.page_inactive_swp, pg, pageq); 1493 else 1494 TAILQ_INSERT_TAIL(&uvm.page_inactive_obj, pg, pageq); 1495 atomic_setbits_int(&pg->pg_flags, PQ_INACTIVE); 1496 uvmexp.inactive++; 1497 pmap_clear_reference(pg); 1498 /* 1499 * update the "clean" bit. this isn't 100% 1500 * accurate, and doesn't have to be. we'll 1501 * re-sync it after we zap all mappings when 1502 * scanning the inactive list. 1503 */ 1504 if ((pg->pg_flags & PG_CLEAN) != 0 && 1505 pmap_is_modified(pg)) 1506 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 1507 } 1508} 1509 1510/* 1511 * uvm_pageactivate: activate page 1512 * 1513 * => caller must lock page queues 1514 */ 1515void 1516uvm_pageactivate(struct vm_page *pg) 1517{ 1518 if (pg->pg_flags & PQ_INACTIVE) { 1519 if (pg->pg_flags & PQ_SWAPBACKED) 1520 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); 1521 else 1522 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); 1523 atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE); 1524 uvmexp.inactive--; 1525 } 1526 if (pg->wire_count == 0) { 1527 1528 /* 1529 * if page is already active, remove it from list so we 1530 * can put it at tail. if it wasn't active, then mark 1531 * it active and bump active count 1532 */ 1533 if (pg->pg_flags & PQ_ACTIVE) 1534 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1535 else { 1536 atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE); 1537 uvmexp.active++; 1538 } 1539 1540 TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq); 1541 } 1542} 1543 1544/* 1545 * uvm_pagezero: zero fill a page 1546 */ 1547void 1548uvm_pagezero(struct vm_page *pg) 1549{ 1550 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 1551 pmap_zero_page(pg); 1552} 1553 1554/* 1555 * uvm_pagecopy: copy a page 1556 */ 1557void 1558uvm_pagecopy(struct vm_page *src, struct vm_page *dst) 1559{ 1560 atomic_clearbits_int(&dst->pg_flags, PG_CLEAN); 1561 pmap_copy_page(src, dst); 1562} 1563 1564/* 1565 * uvm_pagecount: count the number of physical pages in the address range. 1566 */ 1567psize_t 1568uvm_pagecount(struct uvm_constraint_range* constraint) 1569{ 1570 int lcv; 1571 psize_t sz; 1572 paddr_t low, high; 1573 paddr_t ps_low, ps_high; 1574 1575 /* Algorithm uses page numbers. */ 1576 low = atop(constraint->ucr_low); 1577 high = atop(constraint->ucr_high); 1578 1579 sz = 0; 1580 for (lcv = 0; lcv < vm_nphysseg; lcv++) { 1581 ps_low = MAX(low, vm_physmem[lcv].avail_start); 1582 ps_high = MIN(high, vm_physmem[lcv].avail_end); 1583 if (ps_low < ps_high) 1584 sz += ps_high - ps_low; 1585 } 1586 return sz; 1587} 1588