uvm_page.c revision 1.81
1/* $OpenBSD: uvm_page.c,v 1.81 2009/06/01 17:42:33 ariane Exp $ */ 2/* $NetBSD: uvm_page.c,v 1.44 2000/11/27 08:40:04 chs Exp $ */ 3 4/* 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * Copyright (c) 1991, 1993, The Regents of the University of California. 7 * 8 * All rights reserved. 9 * 10 * This code is derived from software contributed to Berkeley by 11 * The Mach Operating System project at Carnegie-Mellon University. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by Charles D. Cranor, 24 * Washington University, the University of California, Berkeley and 25 * its contributors. 26 * 4. Neither the name of the University nor the names of its contributors 27 * may be used to endorse or promote products derived from this software 28 * without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 * 42 * @(#)vm_page.c 8.3 (Berkeley) 3/21/94 43 * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp 44 * 45 * 46 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 47 * All rights reserved. 48 * 49 * Permission to use, copy, modify and distribute this software and 50 * its documentation is hereby granted, provided that both the copyright 51 * notice and this permission notice appear in all copies of the 52 * software, derivative works or modified versions, and any portions 53 * thereof, and that both notices appear in supporting documentation. 54 * 55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 56 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 58 * 59 * Carnegie Mellon requests users of this software to return to 60 * 61 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 62 * School of Computer Science 63 * Carnegie Mellon University 64 * Pittsburgh PA 15213-3890 65 * 66 * any improvements or extensions that they make and grant Carnegie the 67 * rights to redistribute these changes. 68 */ 69 70/* 71 * uvm_page.c: page ops. 72 */ 73 74#include <sys/param.h> 75#include <sys/systm.h> 76#include <sys/malloc.h> 77#include <sys/sched.h> 78#include <sys/kernel.h> 79#include <sys/vnode.h> 80#include <sys/mount.h> 81 82#include <uvm/uvm.h> 83 84/* 85 * global vars... XXXCDC: move to uvm. structure. 86 */ 87 88/* 89 * physical memory config is stored in vm_physmem. 90 */ 91 92struct vm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */ 93int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */ 94 95/* 96 * Some supported CPUs in a given architecture don't support all 97 * of the things necessary to do idle page zero'ing efficiently. 98 * We therefore provide a way to disable it from machdep code here. 99 */ 100 101/* 102 * XXX disabled until we can find a way to do this without causing 103 * problems for either cpu caches or DMA latency. 104 */ 105boolean_t vm_page_zero_enable = FALSE; 106 107/* 108 * local variables 109 */ 110 111/* 112 * these variables record the values returned by vm_page_bootstrap, 113 * for debugging purposes. The implementation of uvm_pageboot_alloc 114 * and pmap_startup here also uses them internally. 115 */ 116 117static vaddr_t virtual_space_start; 118static vaddr_t virtual_space_end; 119 120/* 121 * we use a hash table with only one bucket during bootup. we will 122 * later rehash (resize) the hash table once the allocator is ready. 123 * we static allocate the one bootstrap bucket below... 124 */ 125 126static struct pglist uvm_bootbucket; 127 128/* 129 * History 130 */ 131UVMHIST_DECL(pghist); 132 133/* 134 * local prototypes 135 */ 136 137static void uvm_pageinsert(struct vm_page *); 138static void uvm_pageremove(struct vm_page *); 139 140/* 141 * inline functions 142 */ 143 144/* 145 * uvm_pageinsert: insert a page in the object and the hash table 146 * 147 * => caller must lock object 148 * => caller must lock page queues 149 * => call should have already set pg's object and offset pointers 150 * and bumped the version counter 151 */ 152 153__inline static void 154uvm_pageinsert(struct vm_page *pg) 155{ 156 struct pglist *buck; 157 UVMHIST_FUNC("uvm_pageinsert"); UVMHIST_CALLED(pghist); 158 159 KASSERT((pg->pg_flags & PG_TABLED) == 0); 160 mtx_enter(&uvm.hashlock); 161 buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)]; 162 TAILQ_INSERT_TAIL(buck, pg, fq.queues.hashq); /* put in hash */ 163 mtx_leave(&uvm.hashlock); 164 165 TAILQ_INSERT_TAIL(&pg->uobject->memq, pg, 166 fq.queues.listq); /* put in object */ 167 atomic_setbits_int(&pg->pg_flags, PG_TABLED); 168 pg->uobject->uo_npages++; 169} 170 171/* 172 * uvm_page_remove: remove page from object and hash 173 * 174 * => caller must lock object 175 * => caller must lock page queues 176 */ 177 178static __inline void 179uvm_pageremove(struct vm_page *pg) 180{ 181 struct pglist *buck; 182 UVMHIST_FUNC("uvm_pageremove"); UVMHIST_CALLED(pghist); 183 184 KASSERT(pg->pg_flags & PG_TABLED); 185 mtx_enter(&uvm.hashlock); 186 buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)]; 187 TAILQ_REMOVE(buck, pg, fq.queues.hashq); 188 mtx_leave(&uvm.hashlock); 189 190#ifdef UBC 191 if (pg->uobject->pgops == &uvm_vnodeops) { 192 uvm_pgcnt_vnode--; 193 } 194#endif 195 196 /* object should be locked */ 197 TAILQ_REMOVE(&pg->uobject->memq, pg, fq.queues.listq); 198 199 atomic_clearbits_int(&pg->pg_flags, PG_TABLED|PQ_AOBJ); 200 pg->uobject->uo_npages--; 201 pg->uobject = NULL; 202 pg->pg_version++; 203} 204 205/* 206 * uvm_page_init: init the page system. called from uvm_init(). 207 * 208 * => we return the range of kernel virtual memory in kvm_startp/kvm_endp 209 */ 210 211void 212uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp) 213{ 214 vsize_t freepages, pagecount, n; 215 vm_page_t pagearray; 216 int lcv, i; 217 paddr_t paddr; 218#if defined(UVMHIST) 219 static struct uvm_history_ent pghistbuf[100]; 220#endif 221 222 UVMHIST_FUNC("uvm_page_init"); 223 UVMHIST_INIT_STATIC(pghist, pghistbuf); 224 UVMHIST_CALLED(pghist); 225 226 /* 227 * init the page queues and page queue locks 228 */ 229 230 TAILQ_INIT(&uvm.page_active); 231 TAILQ_INIT(&uvm.page_inactive_swp); 232 TAILQ_INIT(&uvm.page_inactive_obj); 233 simple_lock_init(&uvm.pageqlock); 234 mtx_init(&uvm.fpageqlock, IPL_VM); 235 uvm_pmr_init(); 236 237 /* 238 * init the <obj,offset> => <page> hash table. for now 239 * we just have one bucket (the bootstrap bucket). later on we 240 * will allocate new buckets as we dynamically resize the hash table. 241 */ 242 243 uvm.page_nhash = 1; /* 1 bucket */ 244 uvm.page_hashmask = 0; /* mask for hash function */ 245 uvm.page_hash = &uvm_bootbucket; /* install bootstrap bucket */ 246 TAILQ_INIT(uvm.page_hash); /* init hash table */ 247 mtx_init(&uvm.hashlock, IPL_VM); /* init hash table lock */ 248 249 /* 250 * allocate vm_page structures. 251 */ 252 253 /* 254 * sanity check: 255 * before calling this function the MD code is expected to register 256 * some free RAM with the uvm_page_physload() function. our job 257 * now is to allocate vm_page structures for this memory. 258 */ 259 260 if (vm_nphysseg == 0) 261 panic("uvm_page_bootstrap: no memory pre-allocated"); 262 263 /* 264 * first calculate the number of free pages... 265 * 266 * note that we use start/end rather than avail_start/avail_end. 267 * this allows us to allocate extra vm_page structures in case we 268 * want to return some memory to the pool after booting. 269 */ 270 271 freepages = 0; 272 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 273 freepages += (vm_physmem[lcv].end - vm_physmem[lcv].start); 274 275 /* 276 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can 277 * use. for each page of memory we use we need a vm_page structure. 278 * thus, the total number of pages we can use is the total size of 279 * the memory divided by the PAGE_SIZE plus the size of the vm_page 280 * structure. we add one to freepages as a fudge factor to avoid 281 * truncation errors (since we can only allocate in terms of whole 282 * pages). 283 */ 284 285 pagecount = (((paddr_t)freepages + 1) << PAGE_SHIFT) / 286 (PAGE_SIZE + sizeof(struct vm_page)); 287 pagearray = (vm_page_t)uvm_pageboot_alloc(pagecount * 288 sizeof(struct vm_page)); 289 memset(pagearray, 0, pagecount * sizeof(struct vm_page)); 290 291 /* 292 * init the vm_page structures and put them in the correct place. 293 */ 294 295 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) { 296 n = vm_physmem[lcv].end - vm_physmem[lcv].start; 297 if (n > pagecount) { 298 printf("uvm_page_init: lost %ld page(s) in init\n", 299 (long)(n - pagecount)); 300 panic("uvm_page_init"); /* XXXCDC: shouldn't happen? */ 301 /* n = pagecount; */ 302 } 303 304 /* set up page array pointers */ 305 vm_physmem[lcv].pgs = pagearray; 306 pagearray += n; 307 pagecount -= n; 308 vm_physmem[lcv].lastpg = vm_physmem[lcv].pgs + (n - 1); 309 310 /* init and free vm_pages (we've already zeroed them) */ 311 paddr = ptoa(vm_physmem[lcv].start); 312 for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) { 313 vm_physmem[lcv].pgs[i].phys_addr = paddr; 314#ifdef __HAVE_VM_PAGE_MD 315 VM_MDPAGE_INIT(&vm_physmem[lcv].pgs[i]); 316#endif 317 if (atop(paddr) >= vm_physmem[lcv].avail_start && 318 atop(paddr) <= vm_physmem[lcv].avail_end) { 319 uvmexp.npages++; 320 } 321 } 322 323 /* add pages to free pool */ 324 uvm_pmr_freepages(&vm_physmem[lcv].pgs[ 325 vm_physmem[lcv].avail_start - vm_physmem[lcv].start], 326 vm_physmem[lcv].avail_end - vm_physmem[lcv].avail_start); 327 } 328 329 /* 330 * pass up the values of virtual_space_start and 331 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper 332 * layers of the VM. 333 */ 334 335 *kvm_startp = round_page(virtual_space_start); 336 *kvm_endp = trunc_page(virtual_space_end); 337 338 /* 339 * init locks for kernel threads 340 */ 341 mtx_init(&uvm.aiodoned_lock, IPL_BIO); 342 343 /* 344 * init reserve thresholds 345 * XXXCDC - values may need adjusting 346 */ 347 uvmexp.reserve_pagedaemon = 4; 348 uvmexp.reserve_kernel = 6; 349 uvmexp.anonminpct = 10; 350 uvmexp.vnodeminpct = 10; 351 uvmexp.vtextminpct = 5; 352 uvmexp.anonmin = uvmexp.anonminpct * 256 / 100; 353 uvmexp.vnodemin = uvmexp.vnodeminpct * 256 / 100; 354 uvmexp.vtextmin = uvmexp.vtextminpct * 256 / 100; 355 356 /* 357 * determine if we should zero pages in the idle loop. 358 */ 359 360 uvm.page_idle_zero = vm_page_zero_enable; 361 362 /* 363 * done! 364 */ 365 366 uvm.page_init_done = TRUE; 367} 368 369/* 370 * uvm_setpagesize: set the page size 371 * 372 * => sets page_shift and page_mask from uvmexp.pagesize. 373 */ 374 375void 376uvm_setpagesize(void) 377{ 378 if (uvmexp.pagesize == 0) 379 uvmexp.pagesize = DEFAULT_PAGE_SIZE; 380 uvmexp.pagemask = uvmexp.pagesize - 1; 381 if ((uvmexp.pagemask & uvmexp.pagesize) != 0) 382 panic("uvm_setpagesize: page size not a power of two"); 383 for (uvmexp.pageshift = 0; ; uvmexp.pageshift++) 384 if ((1 << uvmexp.pageshift) == uvmexp.pagesize) 385 break; 386} 387 388/* 389 * uvm_pageboot_alloc: steal memory from physmem for bootstrapping 390 */ 391 392vaddr_t 393uvm_pageboot_alloc(vsize_t size) 394{ 395#if defined(PMAP_STEAL_MEMORY) 396 vaddr_t addr; 397 398 /* 399 * defer bootstrap allocation to MD code (it may want to allocate 400 * from a direct-mapped segment). pmap_steal_memory should round 401 * off virtual_space_start/virtual_space_end. 402 */ 403 404 addr = pmap_steal_memory(size, &virtual_space_start, 405 &virtual_space_end); 406 407 return(addr); 408 409#else /* !PMAP_STEAL_MEMORY */ 410 411 static boolean_t initialized = FALSE; 412 vaddr_t addr, vaddr; 413 paddr_t paddr; 414 415 /* round to page size */ 416 size = round_page(size); 417 418 /* 419 * on first call to this function, initialize ourselves. 420 */ 421 if (initialized == FALSE) { 422 pmap_virtual_space(&virtual_space_start, &virtual_space_end); 423 424 /* round it the way we like it */ 425 virtual_space_start = round_page(virtual_space_start); 426 virtual_space_end = trunc_page(virtual_space_end); 427 428 initialized = TRUE; 429 } 430 431 /* 432 * allocate virtual memory for this request 433 */ 434 if (virtual_space_start == virtual_space_end || 435 (virtual_space_end - virtual_space_start) < size) 436 panic("uvm_pageboot_alloc: out of virtual space"); 437 438 addr = virtual_space_start; 439 440#ifdef PMAP_GROWKERNEL 441 /* 442 * If the kernel pmap can't map the requested space, 443 * then allocate more resources for it. 444 */ 445 if (uvm_maxkaddr < (addr + size)) { 446 uvm_maxkaddr = pmap_growkernel(addr + size); 447 if (uvm_maxkaddr < (addr + size)) 448 panic("uvm_pageboot_alloc: pmap_growkernel() failed"); 449 } 450#endif 451 452 virtual_space_start += size; 453 454 /* 455 * allocate and mapin physical pages to back new virtual pages 456 */ 457 458 for (vaddr = round_page(addr) ; vaddr < addr + size ; 459 vaddr += PAGE_SIZE) { 460 461 if (!uvm_page_physget(&paddr)) 462 panic("uvm_pageboot_alloc: out of memory"); 463 464 /* 465 * Note this memory is no longer managed, so using 466 * pmap_kenter is safe. 467 */ 468 pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE); 469 } 470 pmap_update(pmap_kernel()); 471 return(addr); 472#endif /* PMAP_STEAL_MEMORY */ 473} 474 475#if !defined(PMAP_STEAL_MEMORY) 476/* 477 * uvm_page_physget: "steal" one page from the vm_physmem structure. 478 * 479 * => attempt to allocate it off the end of a segment in which the "avail" 480 * values match the start/end values. if we can't do that, then we 481 * will advance both values (making them equal, and removing some 482 * vm_page structures from the non-avail area). 483 * => return false if out of memory. 484 */ 485 486/* subroutine: try to allocate from memory chunks on the specified freelist */ 487boolean_t uvm_page_physget_freelist(paddr_t *, int); 488 489boolean_t 490uvm_page_physget_freelist(paddr_t *paddrp, int freelist) 491{ 492 int lcv, x; 493 UVMHIST_FUNC("uvm_page_physget_freelist"); UVMHIST_CALLED(pghist); 494 495 /* pass 1: try allocating from a matching end */ 496#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \ 497 (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 498 for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--) 499#else 500 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 501#endif 502 { 503 504 if (uvm.page_init_done == TRUE) 505 panic("uvm_page_physget: called _after_ bootstrap"); 506 507 if (vm_physmem[lcv].free_list != freelist) 508 continue; 509 510 /* try from front */ 511 if (vm_physmem[lcv].avail_start == vm_physmem[lcv].start && 512 vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) { 513 *paddrp = ptoa(vm_physmem[lcv].avail_start); 514 vm_physmem[lcv].avail_start++; 515 vm_physmem[lcv].start++; 516 /* nothing left? nuke it */ 517 if (vm_physmem[lcv].avail_start == 518 vm_physmem[lcv].end) { 519 if (vm_nphysseg == 1) 520 panic("uvm_page_physget: out of memory!"); 521 vm_nphysseg--; 522 for (x = lcv ; x < vm_nphysseg ; x++) 523 /* structure copy */ 524 vm_physmem[x] = vm_physmem[x+1]; 525 } 526 return (TRUE); 527 } 528 529 /* try from rear */ 530 if (vm_physmem[lcv].avail_end == vm_physmem[lcv].end && 531 vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) { 532 *paddrp = ptoa(vm_physmem[lcv].avail_end - 1); 533 vm_physmem[lcv].avail_end--; 534 vm_physmem[lcv].end--; 535 /* nothing left? nuke it */ 536 if (vm_physmem[lcv].avail_end == 537 vm_physmem[lcv].start) { 538 if (vm_nphysseg == 1) 539 panic("uvm_page_physget: out of memory!"); 540 vm_nphysseg--; 541 for (x = lcv ; x < vm_nphysseg ; x++) 542 /* structure copy */ 543 vm_physmem[x] = vm_physmem[x+1]; 544 } 545 return (TRUE); 546 } 547 } 548 549 /* pass2: forget about matching ends, just allocate something */ 550#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \ 551 (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 552 for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--) 553#else 554 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 555#endif 556 { 557 558 /* any room in this bank? */ 559 if (vm_physmem[lcv].avail_start >= vm_physmem[lcv].avail_end) 560 continue; /* nope */ 561 562 *paddrp = ptoa(vm_physmem[lcv].avail_start); 563 vm_physmem[lcv].avail_start++; 564 /* truncate! */ 565 vm_physmem[lcv].start = vm_physmem[lcv].avail_start; 566 567 /* nothing left? nuke it */ 568 if (vm_physmem[lcv].avail_start == vm_physmem[lcv].end) { 569 if (vm_nphysseg == 1) 570 panic("uvm_page_physget: out of memory!"); 571 vm_nphysseg--; 572 for (x = lcv ; x < vm_nphysseg ; x++) 573 /* structure copy */ 574 vm_physmem[x] = vm_physmem[x+1]; 575 } 576 return (TRUE); 577 } 578 579 return (FALSE); /* whoops! */ 580} 581 582boolean_t 583uvm_page_physget(paddr_t *paddrp) 584{ 585 int i; 586 UVMHIST_FUNC("uvm_page_physget"); UVMHIST_CALLED(pghist); 587 588 /* try in the order of freelist preference */ 589 for (i = 0; i < VM_NFREELIST; i++) 590 if (uvm_page_physget_freelist(paddrp, i) == TRUE) 591 return (TRUE); 592 return (FALSE); 593} 594#endif /* PMAP_STEAL_MEMORY */ 595 596/* 597 * uvm_page_physload: load physical memory into VM system 598 * 599 * => all args are PFs 600 * => all pages in start/end get vm_page structures 601 * => areas marked by avail_start/avail_end get added to the free page pool 602 * => we are limited to VM_PHYSSEG_MAX physical memory segments 603 */ 604 605void 606uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start, 607 paddr_t avail_end, int free_list) 608{ 609 int preload, lcv; 610 psize_t npages; 611 struct vm_page *pgs; 612 struct vm_physseg *ps; 613 614 if (uvmexp.pagesize == 0) 615 panic("uvm_page_physload: page size not set!"); 616 617 if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT) 618 panic("uvm_page_physload: bad free list %d", free_list); 619 620 if (start >= end) 621 panic("uvm_page_physload: start >= end"); 622 623 /* 624 * do we have room? 625 */ 626 if (vm_nphysseg == VM_PHYSSEG_MAX) { 627 printf("uvm_page_physload: unable to load physical memory " 628 "segment\n"); 629 printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n", 630 VM_PHYSSEG_MAX, (long long)start, (long long)end); 631 printf("\tincrease VM_PHYSSEG_MAX\n"); 632 return; 633 } 634 635 /* 636 * check to see if this is a "preload" (i.e. uvm_mem_init hasn't been 637 * called yet, so malloc is not available). 638 */ 639 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) { 640 if (vm_physmem[lcv].pgs) 641 break; 642 } 643 preload = (lcv == vm_nphysseg); 644 645 /* 646 * if VM is already running, attempt to malloc() vm_page structures 647 */ 648 if (!preload) { 649#if defined(VM_PHYSSEG_NOADD) 650 panic("uvm_page_physload: tried to add RAM after vm_mem_init"); 651#else 652 /* XXXCDC: need some sort of lockout for this case */ 653 paddr_t paddr; 654 npages = end - start; /* # of pages */ 655 pgs = (vm_page *)uvm_km_zalloc(kernel_map, 656 sizeof(struct vm_page) * npages); 657 if (pgs == NULL) { 658 printf("uvm_page_physload: can not malloc vm_page " 659 "structs for segment\n"); 660 printf("\tignoring 0x%lx -> 0x%lx\n", start, end); 661 return; 662 } 663 /* init phys_addr and free_list, and free pages */ 664 for (lcv = 0, paddr = ptoa(start) ; 665 lcv < npages ; lcv++, paddr += PAGE_SIZE) { 666 pgs[lcv].phys_addr = paddr; 667 pgs[lcv].free_list = free_list; 668 if (atop(paddr) >= avail_start && 669 atop(paddr) <= avail_end) 670 uvm_pagefree(&pgs[lcv]); 671 } 672 /* XXXCDC: incomplete: need to update uvmexp.free, what else? */ 673 /* XXXCDC: need hook to tell pmap to rebuild pv_list, etc... */ 674#endif 675 } else { 676 677 /* gcc complains if these don't get init'd */ 678 pgs = NULL; 679 npages = 0; 680 681 } 682 683 /* 684 * now insert us in the proper place in vm_physmem[] 685 */ 686 687#if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM) 688 689 /* random: put it at the end (easy!) */ 690 ps = &vm_physmem[vm_nphysseg]; 691 692#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 693 694 { 695 int x; 696 /* sort by address for binary search */ 697 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 698 if (start < vm_physmem[lcv].start) 699 break; 700 ps = &vm_physmem[lcv]; 701 /* move back other entries, if necessary ... */ 702 for (x = vm_nphysseg ; x > lcv ; x--) 703 /* structure copy */ 704 vm_physmem[x] = vm_physmem[x - 1]; 705 } 706 707#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) 708 709 { 710 int x; 711 /* sort by largest segment first */ 712 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 713 if ((end - start) > 714 (vm_physmem[lcv].end - vm_physmem[lcv].start)) 715 break; 716 ps = &vm_physmem[lcv]; 717 /* move back other entries, if necessary ... */ 718 for (x = vm_nphysseg ; x > lcv ; x--) 719 /* structure copy */ 720 vm_physmem[x] = vm_physmem[x - 1]; 721 } 722 723#else 724 725 panic("uvm_page_physload: unknown physseg strategy selected!"); 726 727#endif 728 729 ps->start = start; 730 ps->end = end; 731 ps->avail_start = avail_start; 732 ps->avail_end = avail_end; 733 if (preload) { 734 ps->pgs = NULL; 735 } else { 736 ps->pgs = pgs; 737 ps->lastpg = pgs + npages - 1; 738 } 739 ps->free_list = free_list; 740 vm_nphysseg++; 741 742 /* 743 * done! 744 */ 745 746 if (!preload) 747 uvm_page_rehash(); 748 749 return; 750} 751 752/* 753 * uvm_page_rehash: reallocate hash table based on number of free pages. 754 */ 755 756void 757uvm_page_rehash(void) 758{ 759 int freepages, lcv, bucketcount, oldcount; 760 struct pglist *newbuckets, *oldbuckets; 761 struct vm_page *pg; 762 size_t newsize, oldsize; 763 764 /* 765 * compute number of pages that can go in the free pool 766 */ 767 768 freepages = 0; 769 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 770 freepages += 771 (vm_physmem[lcv].avail_end - vm_physmem[lcv].avail_start); 772 773 /* 774 * compute number of buckets needed for this number of pages 775 */ 776 777 bucketcount = 1; 778 while (bucketcount < freepages) 779 bucketcount = bucketcount * 2; 780 781 /* 782 * compute the size of the current table and new table. 783 */ 784 785 oldbuckets = uvm.page_hash; 786 oldcount = uvm.page_nhash; 787 oldsize = round_page(sizeof(struct pglist) * oldcount); 788 newsize = round_page(sizeof(struct pglist) * bucketcount); 789 790 /* 791 * allocate the new buckets 792 */ 793 794 newbuckets = (struct pglist *) uvm_km_alloc(kernel_map, newsize); 795 if (newbuckets == NULL) { 796 printf("uvm_page_physrehash: WARNING: could not grow page " 797 "hash table\n"); 798 return; 799 } 800 for (lcv = 0 ; lcv < bucketcount ; lcv++) 801 TAILQ_INIT(&newbuckets[lcv]); 802 803 /* 804 * now replace the old buckets with the new ones and rehash everything 805 */ 806 807 mtx_enter(&uvm.hashlock); 808 uvm.page_hash = newbuckets; 809 uvm.page_nhash = bucketcount; 810 uvm.page_hashmask = bucketcount - 1; /* power of 2 */ 811 812 /* ... and rehash */ 813 for (lcv = 0 ; lcv < oldcount ; lcv++) { 814 while ((pg = TAILQ_FIRST(&oldbuckets[lcv])) != NULL) { 815 TAILQ_REMOVE(&oldbuckets[lcv], pg, fq.queues.hashq); 816 TAILQ_INSERT_TAIL( 817 &uvm.page_hash[uvm_pagehash(pg->uobject, pg->offset)], 818 pg, fq.queues.hashq); 819 } 820 } 821 mtx_leave(&uvm.hashlock); 822 823 /* 824 * free old bucket array if is not the boot-time table 825 */ 826 827 if (oldbuckets != &uvm_bootbucket) 828 uvm_km_free(kernel_map, (vaddr_t) oldbuckets, oldsize); 829 830 /* 831 * done 832 */ 833 return; 834} 835 836 837#ifdef DDB /* XXXCDC: TMP TMP TMP DEBUG DEBUG DEBUG */ 838 839void uvm_page_physdump(void); /* SHUT UP GCC */ 840 841/* call from DDB */ 842void 843uvm_page_physdump(void) 844{ 845 int lcv; 846 847 printf("rehash: physical memory config [segs=%d of %d]:\n", 848 vm_nphysseg, VM_PHYSSEG_MAX); 849 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 850 printf("0x%llx->0x%llx [0x%llx->0x%llx]\n", 851 (long long)vm_physmem[lcv].start, 852 (long long)vm_physmem[lcv].end, 853 (long long)vm_physmem[lcv].avail_start, 854 (long long)vm_physmem[lcv].avail_end); 855 printf("STRATEGY = "); 856 switch (VM_PHYSSEG_STRAT) { 857 case VM_PSTRAT_RANDOM: printf("RANDOM\n"); break; 858 case VM_PSTRAT_BSEARCH: printf("BSEARCH\n"); break; 859 case VM_PSTRAT_BIGFIRST: printf("BIGFIRST\n"); break; 860 default: printf("<<UNKNOWN>>!!!!\n"); 861 } 862 printf("number of buckets = %d\n", uvm.page_nhash); 863} 864#endif 865 866void 867uvm_shutdown(void) 868{ 869#ifdef UVM_SWAP_ENCRYPT 870 uvm_swap_finicrypt_all(); 871#endif 872} 873 874/* 875 * uvm_pagealloc_strat: allocate vm_page from a particular free list. 876 * 877 * => return null if no pages free 878 * => wake up pagedaemon if number of free pages drops below low water mark 879 * => if obj != NULL, obj must be locked (to put in hash) 880 * => if anon != NULL, anon must be locked (to put in anon) 881 * => only one of obj or anon can be non-null 882 * => caller must activate/deactivate page if it is not wired. 883 * => free_list is ignored if strat == UVM_PGA_STRAT_NORMAL. 884 * => policy decision: it is more important to pull a page off of the 885 * appropriate priority free list than it is to get a zero'd or 886 * unknown contents page. This is because we live with the 887 * consequences of a bad free list decision for the entire 888 * lifetime of the page, e.g. if the page comes from memory that 889 * is slower to access. 890 */ 891 892struct vm_page * 893uvm_pagealloc_strat(struct uvm_object *obj, voff_t off, struct vm_anon *anon, 894 int flags, int strat, int free_list) 895{ 896 struct pglist pgl; 897 int pmr_flags; 898 struct vm_page *pg; 899 boolean_t use_reserve; 900 UVMHIST_FUNC("uvm_pagealloc_strat"); UVMHIST_CALLED(pghist); 901 902 KASSERT(obj == NULL || anon == NULL); 903 KASSERT(off == trunc_page(off)); 904 905 /* 906 * check to see if we need to generate some free pages waking 907 * the pagedaemon. 908 */ 909 if ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freemin || 910 ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freetarg && 911 uvmexp.inactive < uvmexp.inactarg)) 912 wakeup(&uvm.pagedaemon_proc); 913 914 /* 915 * fail if any of these conditions is true: 916 * [1] there really are no free pages, or 917 * [2] only kernel "reserved" pages remain and 918 * the page isn't being allocated to a kernel object. 919 * [3] only pagedaemon "reserved" pages remain and 920 * the requestor isn't the pagedaemon. 921 */ 922 923 use_reserve = (flags & UVM_PGA_USERESERVE) || 924 (obj && UVM_OBJ_IS_KERN_OBJECT(obj)); 925 if ((uvmexp.free <= uvmexp.reserve_kernel && !use_reserve) || 926 (uvmexp.free <= uvmexp.reserve_pagedaemon && 927 !((curproc == uvm.pagedaemon_proc) || 928 (curproc == syncerproc)))) 929 goto fail; 930 931 pmr_flags = UVM_PLA_NOWAIT; 932 if (flags & UVM_PGA_ZERO) 933 pmr_flags |= UVM_PLA_ZERO; 934 TAILQ_INIT(&pgl); 935 if (uvm_pmr_getpages(1, 0, 0, 1, 0, 1, pmr_flags, &pgl) != 0) 936 goto fail; 937 pg = TAILQ_FIRST(&pgl); 938 KASSERT(pg != NULL); 939 KASSERT(TAILQ_NEXT(pg, pageq) == NULL); 940 941 pg->offset = off; 942 pg->uobject = obj; 943 pg->uanon = anon; 944 pg->pg_flags = PG_BUSY|PG_CLEAN|PG_FAKE; 945 if (anon) { 946 anon->an_page = pg; 947 atomic_setbits_int(&pg->pg_flags, PQ_ANON); 948#ifdef UBC 949 uvm_pgcnt_anon++; 950#endif 951 } else { 952 if (obj) 953 uvm_pageinsert(pg); 954 } 955#if defined(UVM_PAGE_TRKOWN) 956 pg->owner_tag = NULL; 957#endif 958 UVM_PAGE_OWN(pg, "new alloc"); 959 960 UVMHIST_LOG(pghist, "allocated pg %p/%lx", pg, 961 (u_long)VM_PAGE_TO_PHYS(pg), 0, 0); 962 return(pg); 963 964 fail: 965 UVMHIST_LOG(pghist, "failed!", 0, 0, 0, 0); 966 return (NULL); 967} 968 969/* 970 * uvm_pagerealloc: reallocate a page from one object to another 971 * 972 * => both objects must be locked 973 */ 974 975void 976uvm_pagerealloc(struct vm_page *pg, struct uvm_object *newobj, voff_t newoff) 977{ 978 979 UVMHIST_FUNC("uvm_pagerealloc"); UVMHIST_CALLED(pghist); 980 981 /* 982 * remove it from the old object 983 */ 984 985 if (pg->uobject) { 986 uvm_pageremove(pg); 987 } 988 989 /* 990 * put it in the new object 991 */ 992 993 if (newobj) { 994 pg->uobject = newobj; 995 pg->offset = newoff; 996 pg->pg_version++; 997 uvm_pageinsert(pg); 998 } 999} 1000 1001 1002/* 1003 * uvm_pagefree: free page 1004 * 1005 * => erase page's identity (i.e. remove from hash/object) 1006 * => put page on free list 1007 * => caller must lock owning object (either anon or uvm_object) 1008 * => caller must lock page queues 1009 * => assumes all valid mappings of pg are gone 1010 */ 1011 1012void 1013uvm_pagefree(struct vm_page *pg) 1014{ 1015 struct pglist pgl; 1016 int saved_loan_count = pg->loan_count; 1017 UVMHIST_FUNC("uvm_pagefree"); UVMHIST_CALLED(pghist); 1018 1019#ifdef DEBUG 1020 if (pg->uobject == (void *)0xdeadbeef && 1021 pg->uanon == (void *)0xdeadbeef) { 1022 panic("uvm_pagefree: freeing free page %p", pg); 1023 } 1024#endif 1025 1026 UVMHIST_LOG(pghist, "freeing pg %p/%lx", pg, 1027 (u_long)VM_PAGE_TO_PHYS(pg), 0, 0); 1028 1029 /* 1030 * if the page was an object page (and thus "TABLED"), remove it 1031 * from the object. 1032 */ 1033 1034 if (pg->pg_flags & PG_TABLED) { 1035 1036 /* 1037 * if the object page is on loan we are going to drop ownership. 1038 * it is possible that an anon will take over as owner for this 1039 * page later on. the anon will want a !PG_CLEAN page so that 1040 * it knows it needs to allocate swap if it wants to page the 1041 * page out. 1042 */ 1043 1044 /* in case an anon takes over */ 1045 if (saved_loan_count) 1046 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 1047 uvm_pageremove(pg); 1048 1049 /* 1050 * if our page was on loan, then we just lost control over it 1051 * (in fact, if it was loaned to an anon, the anon may have 1052 * already taken over ownership of the page by now and thus 1053 * changed the loan_count [e.g. in uvmfault_anonget()]) we just 1054 * return (when the last loan is dropped, then the page can be 1055 * freed by whatever was holding the last loan). 1056 */ 1057 1058 if (saved_loan_count) 1059 return; 1060 } else if (saved_loan_count && pg->uanon) { 1061 /* 1062 * if our page is owned by an anon and is loaned out to the 1063 * kernel then we just want to drop ownership and return. 1064 * the kernel must free the page when all its loans clear ... 1065 * note that the kernel can't change the loan status of our 1066 * page as long as we are holding PQ lock. 1067 */ 1068 atomic_clearbits_int(&pg->pg_flags, PQ_ANON); 1069 pg->uanon->an_page = NULL; 1070 pg->uanon = NULL; 1071 return; 1072 } 1073 KASSERT(saved_loan_count == 0); 1074 1075 /* 1076 * now remove the page from the queues 1077 */ 1078 1079 if (pg->pg_flags & PQ_ACTIVE) { 1080 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1081 atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE); 1082 uvmexp.active--; 1083 } 1084 if (pg->pg_flags & PQ_INACTIVE) { 1085 if (pg->pg_flags & PQ_SWAPBACKED) 1086 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); 1087 else 1088 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); 1089 atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE); 1090 uvmexp.inactive--; 1091 } 1092 1093 /* 1094 * if the page was wired, unwire it now. 1095 */ 1096 1097 if (pg->wire_count) { 1098 pg->wire_count = 0; 1099 uvmexp.wired--; 1100 } 1101 if (pg->uanon) { 1102 atomic_clearbits_int(&pg->pg_flags, PQ_ANON); 1103 pg->uanon->an_page = NULL; 1104 pg->uanon = NULL; 1105#ifdef UBC 1106 uvm_pgcnt_anon--; 1107#endif 1108 } 1109 1110 /* 1111 * Clean page state bits. 1112 */ 1113 atomic_clearbits_int(&pg->pg_flags, 1114 PG_ZERO|PG_FAKE|PG_BUSY|PG_RELEASED|PG_CLEAN|PG_CLEANCHK); 1115 /* 1116 * Pmap flag cleaning. 1117 * XXX: Shouldn't pmap do this? 1118 */ 1119 atomic_clearbits_int(&pg->pg_flags, 1120 PG_PMAP0|PG_PMAP1|PG_PMAP2|PG_PMAP3); 1121 1122#if defined(DIAGNOSTIC) 1123 if (pg->pg_flags != 0) { 1124 panic("uvm_pagefree: expected page %p pg_flags to be 0\n" 1125 "uvm_pagefree: instead of pg->pg_flags = %x\n", 1126 VM_PAGE_TO_PHYS(pg), pg->pg_flags); 1127 } 1128#endif 1129#ifdef DEBUG 1130 pg->uobject = (void *)0xdeadbeef; 1131 pg->offset = 0xdeadbeef; 1132 pg->uanon = (void *)0xdeadbeef; 1133#endif 1134 TAILQ_INIT(&pgl); 1135 TAILQ_INSERT_HEAD(&pgl, pg, pageq); 1136 uvm_pmr_freepageq(&pgl); 1137 1138 if (uvmexp.zeropages < UVM_PAGEZERO_TARGET) 1139 uvm.page_idle_zero = vm_page_zero_enable; 1140} 1141 1142/* 1143 * uvm_page_unbusy: unbusy an array of pages. 1144 * 1145 * => pages must either all belong to the same object, or all belong to anons. 1146 * => if pages are object-owned, object must be locked. 1147 * => if pages are anon-owned, anons must be unlockd and have 0 refcount. 1148 */ 1149 1150void 1151uvm_page_unbusy(struct vm_page **pgs, int npgs) 1152{ 1153 struct vm_page *pg; 1154 struct uvm_object *uobj; 1155 int i; 1156 UVMHIST_FUNC("uvm_page_unbusy"); UVMHIST_CALLED(pdhist); 1157 1158 for (i = 0; i < npgs; i++) { 1159 pg = pgs[i]; 1160 1161 if (pg == NULL || pg == PGO_DONTCARE) { 1162 continue; 1163 } 1164 if (pg->pg_flags & PG_WANTED) { 1165 wakeup(pg); 1166 } 1167 if (pg->pg_flags & PG_RELEASED) { 1168 UVMHIST_LOG(pdhist, "releasing pg %p", pg,0,0,0); 1169 uobj = pg->uobject; 1170 if (uobj != NULL) { 1171 uobj->pgops->pgo_releasepg(pg, NULL); 1172 } else { 1173 atomic_clearbits_int(&pg->pg_flags, PG_BUSY); 1174 UVM_PAGE_OWN(pg, NULL); 1175 uvm_anfree(pg->uanon); 1176 } 1177 } else { 1178 UVMHIST_LOG(pdhist, "unbusying pg %p", pg,0,0,0); 1179 atomic_clearbits_int(&pg->pg_flags, PG_WANTED|PG_BUSY); 1180 UVM_PAGE_OWN(pg, NULL); 1181 } 1182 } 1183} 1184 1185#if defined(UVM_PAGE_TRKOWN) 1186/* 1187 * uvm_page_own: set or release page ownership 1188 * 1189 * => this is a debugging function that keeps track of who sets PG_BUSY 1190 * and where they do it. it can be used to track down problems 1191 * such a process setting "PG_BUSY" and never releasing it. 1192 * => page's object [if any] must be locked 1193 * => if "tag" is NULL then we are releasing page ownership 1194 */ 1195void 1196uvm_page_own(struct vm_page *pg, char *tag) 1197{ 1198 /* gain ownership? */ 1199 if (tag) { 1200 if (pg->owner_tag) { 1201 printf("uvm_page_own: page %p already owned " 1202 "by proc %d [%s]\n", pg, 1203 pg->owner, pg->owner_tag); 1204 panic("uvm_page_own"); 1205 } 1206 pg->owner = (curproc) ? curproc->p_pid : (pid_t) -1; 1207 pg->owner_tag = tag; 1208 return; 1209 } 1210 1211 /* drop ownership */ 1212 if (pg->owner_tag == NULL) { 1213 printf("uvm_page_own: dropping ownership of an non-owned " 1214 "page (%p)\n", pg); 1215 panic("uvm_page_own"); 1216 } 1217 pg->owner_tag = NULL; 1218 return; 1219} 1220#endif 1221 1222/* 1223 * uvm_pageidlezero: zero free pages while the system is idle. 1224 * 1225 * => we do at least one iteration per call, if we are below the target. 1226 * => we loop until we either reach the target or whichqs indicates that 1227 * there is a process ready to run. 1228 */ 1229void 1230uvm_pageidlezero(void) 1231{ 1232#if 0 /* Disabled for now. */ 1233 struct vm_page *pg; 1234 struct pgfreelist *pgfl; 1235 int free_list; 1236 UVMHIST_FUNC("uvm_pageidlezero"); UVMHIST_CALLED(pghist); 1237 1238 do { 1239 uvm_lock_fpageq(); 1240 1241 if (uvmexp.zeropages >= UVM_PAGEZERO_TARGET) { 1242 uvm.page_idle_zero = FALSE; 1243 uvm_unlock_fpageq(); 1244 return; 1245 } 1246 1247 for (free_list = 0; free_list < VM_NFREELIST; free_list++) { 1248 pgfl = &uvm.page_free[free_list]; 1249 if ((pg = TAILQ_FIRST(&pgfl->pgfl_queues[ 1250 PGFL_UNKNOWN])) != NULL) 1251 break; 1252 } 1253 1254 if (pg == NULL) { 1255 /* 1256 * No non-zero'd pages; don't bother trying again 1257 * until we know we have non-zero'd pages free. 1258 */ 1259 uvm.page_idle_zero = FALSE; 1260 uvm_unlock_fpageq(); 1261 return; 1262 } 1263 1264 TAILQ_REMOVE(&pgfl->pgfl_queues[PGFL_UNKNOWN], pg, pageq); 1265 uvmexp.free--; 1266 uvm_unlock_fpageq(); 1267 1268#ifdef PMAP_PAGEIDLEZERO 1269 if (PMAP_PAGEIDLEZERO(pg) == FALSE) { 1270 /* 1271 * The machine-dependent code detected some 1272 * reason for us to abort zeroing pages, 1273 * probably because there is a process now 1274 * ready to run. 1275 */ 1276 uvm_lock_fpageq(); 1277 TAILQ_INSERT_HEAD(&pgfl->pgfl_queues[PGFL_UNKNOWN], 1278 pg, pageq); 1279 uvmexp.free++; 1280 uvmexp.zeroaborts++; 1281 uvm_unlock_fpageq(); 1282 return; 1283 } 1284#else 1285 /* 1286 * XXX This will toast the cache unless the pmap_zero_page() 1287 * XXX implementation does uncached access. 1288 */ 1289 pmap_zero_page(pg); 1290#endif 1291 atomic_setbits_int(&pg->pg_flags, PG_ZERO); 1292 1293 uvm_lock_fpageq(); 1294 TAILQ_INSERT_HEAD(&pgfl->pgfl_queues[PGFL_ZEROS], pg, pageq); 1295 uvmexp.free++; 1296 uvmexp.zeropages++; 1297 uvm_unlock_fpageq(); 1298 } while (curcpu_is_idle()); 1299#endif /* 0 */ 1300} 1301 1302/* 1303 * when VM_PHYSSEG_MAX is 1, we can simplify these functions 1304 */ 1305 1306#if VM_PHYSSEG_MAX > 1 1307/* 1308 * vm_physseg_find: find vm_physseg structure that belongs to a PA 1309 */ 1310int 1311vm_physseg_find(paddr_t pframe, int *offp) 1312{ 1313 1314#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 1315 /* binary search for it */ 1316 int start, len, try; 1317 1318 /* 1319 * if try is too large (thus target is less than than try) we reduce 1320 * the length to trunc(len/2) [i.e. everything smaller than "try"] 1321 * 1322 * if the try is too small (thus target is greater than try) then 1323 * we set the new start to be (try + 1). this means we need to 1324 * reduce the length to (round(len/2) - 1). 1325 * 1326 * note "adjust" below which takes advantage of the fact that 1327 * (round(len/2) - 1) == trunc((len - 1) / 2) 1328 * for any value of len we may have 1329 */ 1330 1331 for (start = 0, len = vm_nphysseg ; len != 0 ; len = len / 2) { 1332 try = start + (len / 2); /* try in the middle */ 1333 1334 /* start past our try? */ 1335 if (pframe >= vm_physmem[try].start) { 1336 /* was try correct? */ 1337 if (pframe < vm_physmem[try].end) { 1338 if (offp) 1339 *offp = pframe - vm_physmem[try].start; 1340 return(try); /* got it */ 1341 } 1342 start = try + 1; /* next time, start here */ 1343 len--; /* "adjust" */ 1344 } else { 1345 /* 1346 * pframe before try, just reduce length of 1347 * region, done in "for" loop 1348 */ 1349 } 1350 } 1351 return(-1); 1352 1353#else 1354 /* linear search for it */ 1355 int lcv; 1356 1357 for (lcv = 0; lcv < vm_nphysseg; lcv++) { 1358 if (pframe >= vm_physmem[lcv].start && 1359 pframe < vm_physmem[lcv].end) { 1360 if (offp) 1361 *offp = pframe - vm_physmem[lcv].start; 1362 return(lcv); /* got it */ 1363 } 1364 } 1365 return(-1); 1366 1367#endif 1368} 1369 1370/* 1371 * PHYS_TO_VM_PAGE: find vm_page for a PA. used by MI code to get vm_pages 1372 * back from an I/O mapping (ugh!). used in some MD code as well. 1373 */ 1374struct vm_page * 1375PHYS_TO_VM_PAGE(paddr_t pa) 1376{ 1377 paddr_t pf = atop(pa); 1378 int off; 1379 int psi; 1380 1381 psi = vm_physseg_find(pf, &off); 1382 1383 return ((psi == -1) ? NULL : &vm_physmem[psi].pgs[off]); 1384} 1385#endif /* VM_PHYSSEG_MAX > 1 */ 1386 1387/* 1388 * uvm_pagelookup: look up a page 1389 * 1390 * => caller should lock object to keep someone from pulling the page 1391 * out from under it 1392 */ 1393struct vm_page * 1394uvm_pagelookup(struct uvm_object *obj, voff_t off) 1395{ 1396 struct vm_page *pg; 1397 struct pglist *buck; 1398 1399 mtx_enter(&uvm.hashlock); 1400 buck = &uvm.page_hash[uvm_pagehash(obj,off)]; 1401 1402 TAILQ_FOREACH(pg, buck, fq.queues.hashq) { 1403 if (pg->uobject == obj && pg->offset == off) { 1404 break; 1405 } 1406 } 1407 mtx_leave(&uvm.hashlock); 1408 return(pg); 1409} 1410 1411/* 1412 * uvm_pagewire: wire the page, thus removing it from the daemon's grasp 1413 * 1414 * => caller must lock page queues 1415 */ 1416void 1417uvm_pagewire(struct vm_page *pg) 1418{ 1419 if (pg->wire_count == 0) { 1420 if (pg->pg_flags & PQ_ACTIVE) { 1421 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1422 atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE); 1423 uvmexp.active--; 1424 } 1425 if (pg->pg_flags & PQ_INACTIVE) { 1426 if (pg->pg_flags & PQ_SWAPBACKED) 1427 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); 1428 else 1429 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); 1430 atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE); 1431 uvmexp.inactive--; 1432 } 1433 uvmexp.wired++; 1434 } 1435 pg->wire_count++; 1436} 1437 1438/* 1439 * uvm_pageunwire: unwire the page. 1440 * 1441 * => activate if wire count goes to zero. 1442 * => caller must lock page queues 1443 */ 1444void 1445uvm_pageunwire(struct vm_page *pg) 1446{ 1447 pg->wire_count--; 1448 if (pg->wire_count == 0) { 1449 TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq); 1450 uvmexp.active++; 1451 atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE); 1452 uvmexp.wired--; 1453 } 1454} 1455 1456/* 1457 * uvm_pagedeactivate: deactivate page -- no pmaps have access to page 1458 * 1459 * => caller must lock page queues 1460 * => caller must check to make sure page is not wired 1461 * => object that page belongs to must be locked (so we can adjust pg->flags) 1462 */ 1463void 1464uvm_pagedeactivate(struct vm_page *pg) 1465{ 1466 if (pg->pg_flags & PQ_ACTIVE) { 1467 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1468 atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE); 1469 uvmexp.active--; 1470 } 1471 if ((pg->pg_flags & PQ_INACTIVE) == 0) { 1472 KASSERT(pg->wire_count == 0); 1473 if (pg->pg_flags & PQ_SWAPBACKED) 1474 TAILQ_INSERT_TAIL(&uvm.page_inactive_swp, pg, pageq); 1475 else 1476 TAILQ_INSERT_TAIL(&uvm.page_inactive_obj, pg, pageq); 1477 atomic_setbits_int(&pg->pg_flags, PQ_INACTIVE); 1478 uvmexp.inactive++; 1479 pmap_clear_reference(pg); 1480 /* 1481 * update the "clean" bit. this isn't 100% 1482 * accurate, and doesn't have to be. we'll 1483 * re-sync it after we zap all mappings when 1484 * scanning the inactive list. 1485 */ 1486 if ((pg->pg_flags & PG_CLEAN) != 0 && 1487 pmap_is_modified(pg)) 1488 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 1489 } 1490} 1491 1492/* 1493 * uvm_pageactivate: activate page 1494 * 1495 * => caller must lock page queues 1496 */ 1497void 1498uvm_pageactivate(struct vm_page *pg) 1499{ 1500 if (pg->pg_flags & PQ_INACTIVE) { 1501 if (pg->pg_flags & PQ_SWAPBACKED) 1502 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); 1503 else 1504 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); 1505 atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE); 1506 uvmexp.inactive--; 1507 } 1508 if (pg->wire_count == 0) { 1509 1510 /* 1511 * if page is already active, remove it from list so we 1512 * can put it at tail. if it wasn't active, then mark 1513 * it active and bump active count 1514 */ 1515 if (pg->pg_flags & PQ_ACTIVE) 1516 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1517 else { 1518 atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE); 1519 uvmexp.active++; 1520 } 1521 1522 TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq); 1523 } 1524} 1525 1526/* 1527 * uvm_pagezero: zero fill a page 1528 * 1529 * => if page is part of an object then the object should be locked 1530 * to protect pg->flags. 1531 */ 1532void 1533uvm_pagezero(struct vm_page *pg) 1534{ 1535 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 1536 pmap_zero_page(pg); 1537} 1538 1539/* 1540 * uvm_pagecopy: copy a page 1541 * 1542 * => if page is part of an object then the object should be locked 1543 * to protect pg->flags. 1544 */ 1545void 1546uvm_pagecopy(struct vm_page *src, struct vm_page *dst) 1547{ 1548 atomic_clearbits_int(&dst->pg_flags, PG_CLEAN); 1549 pmap_copy_page(src, dst); 1550} 1551 1552/* 1553 * uvm_page_lookup_freelist: look up the free list for the specified page 1554 */ 1555int 1556uvm_page_lookup_freelist(struct vm_page *pg) 1557{ 1558#if VM_PHYSSEG_MAX == 1 1559 return (vm_physmem[0].free_list); 1560#else 1561 int lcv; 1562 1563 lcv = vm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), NULL); 1564 KASSERT(lcv != -1); 1565 return (vm_physmem[lcv].free_list); 1566#endif 1567} 1568