uvm_page.c revision 1.92
1/* $OpenBSD: uvm_page.c,v 1.92 2009/07/22 21:05:37 oga Exp $ */ 2/* $NetBSD: uvm_page.c,v 1.44 2000/11/27 08:40:04 chs Exp $ */ 3 4/* 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * Copyright (c) 1991, 1993, The Regents of the University of California. 7 * 8 * All rights reserved. 9 * 10 * This code is derived from software contributed to Berkeley by 11 * The Mach Operating System project at Carnegie-Mellon University. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by Charles D. Cranor, 24 * Washington University, the University of California, Berkeley and 25 * its contributors. 26 * 4. Neither the name of the University nor the names of its contributors 27 * may be used to endorse or promote products derived from this software 28 * without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 * 42 * @(#)vm_page.c 8.3 (Berkeley) 3/21/94 43 * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp 44 * 45 * 46 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 47 * All rights reserved. 48 * 49 * Permission to use, copy, modify and distribute this software and 50 * its documentation is hereby granted, provided that both the copyright 51 * notice and this permission notice appear in all copies of the 52 * software, derivative works or modified versions, and any portions 53 * thereof, and that both notices appear in supporting documentation. 54 * 55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 56 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 58 * 59 * Carnegie Mellon requests users of this software to return to 60 * 61 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 62 * School of Computer Science 63 * Carnegie Mellon University 64 * Pittsburgh PA 15213-3890 65 * 66 * any improvements or extensions that they make and grant Carnegie the 67 * rights to redistribute these changes. 68 */ 69 70/* 71 * uvm_page.c: page ops. 72 */ 73 74#include <sys/param.h> 75#include <sys/systm.h> 76#include <sys/malloc.h> 77#include <sys/sched.h> 78#include <sys/kernel.h> 79#include <sys/vnode.h> 80#include <sys/mount.h> 81 82#include <uvm/uvm.h> 83 84/* 85 * global vars... XXXCDC: move to uvm. structure. 86 */ 87 88/* 89 * physical memory config is stored in vm_physmem. 90 */ 91 92struct vm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */ 93int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */ 94 95/* 96 * Some supported CPUs in a given architecture don't support all 97 * of the things necessary to do idle page zero'ing efficiently. 98 * We therefore provide a way to disable it from machdep code here. 99 */ 100 101/* 102 * XXX disabled until we can find a way to do this without causing 103 * problems for either cpu caches or DMA latency. 104 */ 105boolean_t vm_page_zero_enable = FALSE; 106 107/* 108 * local variables 109 */ 110 111/* 112 * these variables record the values returned by vm_page_bootstrap, 113 * for debugging purposes. The implementation of uvm_pageboot_alloc 114 * and pmap_startup here also uses them internally. 115 */ 116 117static vaddr_t virtual_space_start; 118static vaddr_t virtual_space_end; 119 120/* 121 * we use a hash table with only one bucket during bootup. we will 122 * later rehash (resize) the hash table once the allocator is ready. 123 * we static allocate the one bootstrap bucket below... 124 */ 125 126static struct pglist uvm_bootbucket; 127 128/* 129 * History 130 */ 131UVMHIST_DECL(pghist); 132 133/* 134 * local prototypes 135 */ 136 137static void uvm_pageinsert(struct vm_page *); 138static void uvm_pageremove(struct vm_page *); 139 140/* 141 * inline functions 142 */ 143 144/* 145 * uvm_pageinsert: insert a page in the object and the hash table 146 * 147 * => caller must lock object 148 * => caller must lock page queues 149 * => call should have already set pg's object and offset pointers 150 * and bumped the version counter 151 */ 152 153__inline static void 154uvm_pageinsert(struct vm_page *pg) 155{ 156 struct pglist *buck; 157 UVMHIST_FUNC("uvm_pageinsert"); UVMHIST_CALLED(pghist); 158 159 KASSERT((pg->pg_flags & PG_TABLED) == 0); 160 mtx_enter(&uvm.hashlock); 161 buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)]; 162 TAILQ_INSERT_TAIL(buck, pg, hashq); /* put in hash */ 163 mtx_leave(&uvm.hashlock); 164 165 TAILQ_INSERT_TAIL(&pg->uobject->memq, pg, listq); /* put in object */ 166 atomic_setbits_int(&pg->pg_flags, PG_TABLED); 167 pg->uobject->uo_npages++; 168} 169 170/* 171 * uvm_page_remove: remove page from object and hash 172 * 173 * => caller must lock object 174 * => caller must lock page queues 175 */ 176 177static __inline void 178uvm_pageremove(struct vm_page *pg) 179{ 180 struct pglist *buck; 181 UVMHIST_FUNC("uvm_pageremove"); UVMHIST_CALLED(pghist); 182 183 KASSERT(pg->pg_flags & PG_TABLED); 184 mtx_enter(&uvm.hashlock); 185 buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)]; 186 TAILQ_REMOVE(buck, pg, hashq); 187 mtx_leave(&uvm.hashlock); 188 189#ifdef UBC 190 if (pg->uobject->pgops == &uvm_vnodeops) { 191 uvm_pgcnt_vnode--; 192 } 193#endif 194 195 /* object should be locked */ 196 TAILQ_REMOVE(&pg->uobject->memq, pg, listq); 197 198 atomic_clearbits_int(&pg->pg_flags, PG_TABLED); 199 pg->uobject->uo_npages--; 200 pg->uobject = NULL; 201 pg->pg_version++; 202} 203 204/* 205 * uvm_page_init: init the page system. called from uvm_init(). 206 * 207 * => we return the range of kernel virtual memory in kvm_startp/kvm_endp 208 */ 209 210void 211uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp) 212{ 213 vsize_t freepages, pagecount, n; 214 vm_page_t pagearray; 215 int lcv, i; 216 paddr_t paddr; 217#if defined(UVMHIST) 218 static struct uvm_history_ent pghistbuf[100]; 219#endif 220 221 UVMHIST_FUNC("uvm_page_init"); 222 UVMHIST_INIT_STATIC(pghist, pghistbuf); 223 UVMHIST_CALLED(pghist); 224 225 /* 226 * init the page queues and page queue locks 227 */ 228 229 for (lcv = 0; lcv < VM_NFREELIST; lcv++) { 230 for (i = 0; i < PGFL_NQUEUES; i++) 231 TAILQ_INIT(&uvm.page_free[lcv].pgfl_queues[i]); 232 } 233 TAILQ_INIT(&uvm.page_active); 234 TAILQ_INIT(&uvm.page_inactive_swp); 235 TAILQ_INIT(&uvm.page_inactive_obj); 236 simple_lock_init(&uvm.pageqlock); 237 mtx_init(&uvm.fpageqlock, IPL_VM); 238 239 /* 240 * init the <obj,offset> => <page> hash table. for now 241 * we just have one bucket (the bootstrap bucket). later on we 242 * will allocate new buckets as we dynamically resize the hash table. 243 */ 244 245 uvm.page_nhash = 1; /* 1 bucket */ 246 uvm.page_hashmask = 0; /* mask for hash function */ 247 uvm.page_hash = &uvm_bootbucket; /* install bootstrap bucket */ 248 TAILQ_INIT(uvm.page_hash); /* init hash table */ 249 mtx_init(&uvm.hashlock, IPL_VM); /* init hash table lock */ 250 251 /* 252 * allocate vm_page structures. 253 */ 254 255 /* 256 * sanity check: 257 * before calling this function the MD code is expected to register 258 * some free RAM with the uvm_page_physload() function. our job 259 * now is to allocate vm_page structures for this memory. 260 */ 261 262 if (vm_nphysseg == 0) 263 panic("uvm_page_bootstrap: no memory pre-allocated"); 264 265 /* 266 * first calculate the number of free pages... 267 * 268 * note that we use start/end rather than avail_start/avail_end. 269 * this allows us to allocate extra vm_page structures in case we 270 * want to return some memory to the pool after booting. 271 */ 272 273 freepages = 0; 274 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 275 freepages += (vm_physmem[lcv].end - vm_physmem[lcv].start); 276 277 /* 278 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can 279 * use. for each page of memory we use we need a vm_page structure. 280 * thus, the total number of pages we can use is the total size of 281 * the memory divided by the PAGE_SIZE plus the size of the vm_page 282 * structure. we add one to freepages as a fudge factor to avoid 283 * truncation errors (since we can only allocate in terms of whole 284 * pages). 285 */ 286 287 pagecount = (((paddr_t)freepages + 1) << PAGE_SHIFT) / 288 (PAGE_SIZE + sizeof(struct vm_page)); 289 pagearray = (vm_page_t)uvm_pageboot_alloc(pagecount * 290 sizeof(struct vm_page)); 291 memset(pagearray, 0, pagecount * sizeof(struct vm_page)); 292 293 /* 294 * init the vm_page structures and put them in the correct place. 295 */ 296 297 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) { 298 n = vm_physmem[lcv].end - vm_physmem[lcv].start; 299 if (n > pagecount) { 300 printf("uvm_page_init: lost %ld page(s) in init\n", 301 (long)(n - pagecount)); 302 panic("uvm_page_init"); /* XXXCDC: shouldn't happen? */ 303 /* n = pagecount; */ 304 } 305 306 /* set up page array pointers */ 307 vm_physmem[lcv].pgs = pagearray; 308 pagearray += n; 309 pagecount -= n; 310 vm_physmem[lcv].lastpg = vm_physmem[lcv].pgs + (n - 1); 311 312 /* init and free vm_pages (we've already zeroed them) */ 313 paddr = ptoa(vm_physmem[lcv].start); 314 for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) { 315 vm_physmem[lcv].pgs[i].phys_addr = paddr; 316#ifdef __HAVE_VM_PAGE_MD 317 VM_MDPAGE_INIT(&vm_physmem[lcv].pgs[i]); 318#endif 319 if (atop(paddr) >= vm_physmem[lcv].avail_start && 320 atop(paddr) <= vm_physmem[lcv].avail_end) { 321 uvmexp.npages++; 322 /* add page to free pool */ 323 uvm_pagefree(&vm_physmem[lcv].pgs[i]); 324 } 325 } 326 } 327 328 /* 329 * pass up the values of virtual_space_start and 330 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper 331 * layers of the VM. 332 */ 333 334 *kvm_startp = round_page(virtual_space_start); 335 *kvm_endp = trunc_page(virtual_space_end); 336 337 /* 338 * init locks for kernel threads 339 */ 340 mtx_init(&uvm.aiodoned_lock, IPL_BIO); 341 342 /* 343 * init reserve thresholds 344 * XXXCDC - values may need adjusting 345 */ 346 uvmexp.reserve_pagedaemon = 4; 347 uvmexp.reserve_kernel = 6; 348 uvmexp.anonminpct = 10; 349 uvmexp.vnodeminpct = 10; 350 uvmexp.vtextminpct = 5; 351 uvmexp.anonmin = uvmexp.anonminpct * 256 / 100; 352 uvmexp.vnodemin = uvmexp.vnodeminpct * 256 / 100; 353 uvmexp.vtextmin = uvmexp.vtextminpct * 256 / 100; 354 355 /* 356 * determine if we should zero pages in the idle loop. 357 */ 358 359 uvm.page_idle_zero = vm_page_zero_enable; 360 361 /* 362 * done! 363 */ 364 365 uvm.page_init_done = TRUE; 366} 367 368/* 369 * uvm_setpagesize: set the page size 370 * 371 * => sets page_shift and page_mask from uvmexp.pagesize. 372 */ 373 374void 375uvm_setpagesize(void) 376{ 377 if (uvmexp.pagesize == 0) 378 uvmexp.pagesize = DEFAULT_PAGE_SIZE; 379 uvmexp.pagemask = uvmexp.pagesize - 1; 380 if ((uvmexp.pagemask & uvmexp.pagesize) != 0) 381 panic("uvm_setpagesize: page size not a power of two"); 382 for (uvmexp.pageshift = 0; ; uvmexp.pageshift++) 383 if ((1 << uvmexp.pageshift) == uvmexp.pagesize) 384 break; 385} 386 387/* 388 * uvm_pageboot_alloc: steal memory from physmem for bootstrapping 389 */ 390 391vaddr_t 392uvm_pageboot_alloc(vsize_t size) 393{ 394#if defined(PMAP_STEAL_MEMORY) 395 vaddr_t addr; 396 397 /* 398 * defer bootstrap allocation to MD code (it may want to allocate 399 * from a direct-mapped segment). pmap_steal_memory should round 400 * off virtual_space_start/virtual_space_end. 401 */ 402 403 addr = pmap_steal_memory(size, &virtual_space_start, 404 &virtual_space_end); 405 406 return(addr); 407 408#else /* !PMAP_STEAL_MEMORY */ 409 410 static boolean_t initialized = FALSE; 411 vaddr_t addr, vaddr; 412 paddr_t paddr; 413 414 /* round to page size */ 415 size = round_page(size); 416 417 /* 418 * on first call to this function, initialize ourselves. 419 */ 420 if (initialized == FALSE) { 421 pmap_virtual_space(&virtual_space_start, &virtual_space_end); 422 423 /* round it the way we like it */ 424 virtual_space_start = round_page(virtual_space_start); 425 virtual_space_end = trunc_page(virtual_space_end); 426 427 initialized = TRUE; 428 } 429 430 /* 431 * allocate virtual memory for this request 432 */ 433 if (virtual_space_start == virtual_space_end || 434 (virtual_space_end - virtual_space_start) < size) 435 panic("uvm_pageboot_alloc: out of virtual space"); 436 437 addr = virtual_space_start; 438 439#ifdef PMAP_GROWKERNEL 440 /* 441 * If the kernel pmap can't map the requested space, 442 * then allocate more resources for it. 443 */ 444 if (uvm_maxkaddr < (addr + size)) { 445 uvm_maxkaddr = pmap_growkernel(addr + size); 446 if (uvm_maxkaddr < (addr + size)) 447 panic("uvm_pageboot_alloc: pmap_growkernel() failed"); 448 } 449#endif 450 451 virtual_space_start += size; 452 453 /* 454 * allocate and mapin physical pages to back new virtual pages 455 */ 456 457 for (vaddr = round_page(addr) ; vaddr < addr + size ; 458 vaddr += PAGE_SIZE) { 459 460 if (!uvm_page_physget(&paddr)) 461 panic("uvm_pageboot_alloc: out of memory"); 462 463 /* 464 * Note this memory is no longer managed, so using 465 * pmap_kenter is safe. 466 */ 467 pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE); 468 } 469 pmap_update(pmap_kernel()); 470 return(addr); 471#endif /* PMAP_STEAL_MEMORY */ 472} 473 474#if !defined(PMAP_STEAL_MEMORY) 475/* 476 * uvm_page_physget: "steal" one page from the vm_physmem structure. 477 * 478 * => attempt to allocate it off the end of a segment in which the "avail" 479 * values match the start/end values. if we can't do that, then we 480 * will advance both values (making them equal, and removing some 481 * vm_page structures from the non-avail area). 482 * => return false if out of memory. 483 */ 484 485/* subroutine: try to allocate from memory chunks on the specified freelist */ 486static boolean_t uvm_page_physget_freelist(paddr_t *, int); 487 488static boolean_t 489uvm_page_physget_freelist(paddr_t *paddrp, int freelist) 490{ 491 int lcv, x; 492 UVMHIST_FUNC("uvm_page_physget_freelist"); UVMHIST_CALLED(pghist); 493 494 /* pass 1: try allocating from a matching end */ 495#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \ 496 (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 497 for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--) 498#else 499 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 500#endif 501 { 502 503 if (uvm.page_init_done == TRUE) 504 panic("uvm_page_physget: called _after_ bootstrap"); 505 506 if (vm_physmem[lcv].free_list != freelist) 507 continue; 508 509 /* try from front */ 510 if (vm_physmem[lcv].avail_start == vm_physmem[lcv].start && 511 vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) { 512 *paddrp = ptoa(vm_physmem[lcv].avail_start); 513 vm_physmem[lcv].avail_start++; 514 vm_physmem[lcv].start++; 515 /* nothing left? nuke it */ 516 if (vm_physmem[lcv].avail_start == 517 vm_physmem[lcv].end) { 518 if (vm_nphysseg == 1) 519 panic("uvm_page_physget: out of memory!"); 520 vm_nphysseg--; 521 for (x = lcv ; x < vm_nphysseg ; x++) 522 /* structure copy */ 523 vm_physmem[x] = vm_physmem[x+1]; 524 } 525 return (TRUE); 526 } 527 528 /* try from rear */ 529 if (vm_physmem[lcv].avail_end == vm_physmem[lcv].end && 530 vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) { 531 *paddrp = ptoa(vm_physmem[lcv].avail_end - 1); 532 vm_physmem[lcv].avail_end--; 533 vm_physmem[lcv].end--; 534 /* nothing left? nuke it */ 535 if (vm_physmem[lcv].avail_end == 536 vm_physmem[lcv].start) { 537 if (vm_nphysseg == 1) 538 panic("uvm_page_physget: out of memory!"); 539 vm_nphysseg--; 540 for (x = lcv ; x < vm_nphysseg ; x++) 541 /* structure copy */ 542 vm_physmem[x] = vm_physmem[x+1]; 543 } 544 return (TRUE); 545 } 546 } 547 548 /* pass2: forget about matching ends, just allocate something */ 549#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \ 550 (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 551 for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--) 552#else 553 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 554#endif 555 { 556 557 /* any room in this bank? */ 558 if (vm_physmem[lcv].avail_start >= vm_physmem[lcv].avail_end) 559 continue; /* nope */ 560 561 *paddrp = ptoa(vm_physmem[lcv].avail_start); 562 vm_physmem[lcv].avail_start++; 563 /* truncate! */ 564 vm_physmem[lcv].start = vm_physmem[lcv].avail_start; 565 566 /* nothing left? nuke it */ 567 if (vm_physmem[lcv].avail_start == vm_physmem[lcv].end) { 568 if (vm_nphysseg == 1) 569 panic("uvm_page_physget: out of memory!"); 570 vm_nphysseg--; 571 for (x = lcv ; x < vm_nphysseg ; x++) 572 /* structure copy */ 573 vm_physmem[x] = vm_physmem[x+1]; 574 } 575 return (TRUE); 576 } 577 578 return (FALSE); /* whoops! */ 579} 580 581boolean_t 582uvm_page_physget(paddr_t *paddrp) 583{ 584 int i; 585 UVMHIST_FUNC("uvm_page_physget"); UVMHIST_CALLED(pghist); 586 587 /* try in the order of freelist preference */ 588 for (i = 0; i < VM_NFREELIST; i++) 589 if (uvm_page_physget_freelist(paddrp, i) == TRUE) 590 return (TRUE); 591 return (FALSE); 592} 593#endif /* PMAP_STEAL_MEMORY */ 594 595/* 596 * uvm_page_physload: load physical memory into VM system 597 * 598 * => all args are PFs 599 * => all pages in start/end get vm_page structures 600 * => areas marked by avail_start/avail_end get added to the free page pool 601 * => we are limited to VM_PHYSSEG_MAX physical memory segments 602 */ 603 604void 605uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start, 606 paddr_t avail_end, int free_list) 607{ 608 int preload, lcv; 609 psize_t npages; 610 struct vm_page *pgs; 611 struct vm_physseg *ps; 612 613 if (uvmexp.pagesize == 0) 614 panic("uvm_page_physload: page size not set!"); 615 616 if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT) 617 panic("uvm_page_physload: bad free list %d", free_list); 618 619 if (start >= end) 620 panic("uvm_page_physload: start >= end"); 621 622 /* 623 * do we have room? 624 */ 625 if (vm_nphysseg == VM_PHYSSEG_MAX) { 626 printf("uvm_page_physload: unable to load physical memory " 627 "segment\n"); 628 printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n", 629 VM_PHYSSEG_MAX, (long long)start, (long long)end); 630 printf("\tincrease VM_PHYSSEG_MAX\n"); 631 return; 632 } 633 634 /* 635 * check to see if this is a "preload" (i.e. uvm_mem_init hasn't been 636 * called yet, so malloc is not available). 637 */ 638 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) { 639 if (vm_physmem[lcv].pgs) 640 break; 641 } 642 preload = (lcv == vm_nphysseg); 643 644 /* 645 * if VM is already running, attempt to malloc() vm_page structures 646 */ 647 if (!preload) { 648#if defined(VM_PHYSSEG_NOADD) 649 panic("uvm_page_physload: tried to add RAM after vm_mem_init"); 650#else 651 /* XXXCDC: need some sort of lockout for this case */ 652 paddr_t paddr; 653 npages = end - start; /* # of pages */ 654 pgs = (vm_page *)uvm_km_zalloc(kernel_map, 655 sizeof(struct vm_page) * npages); 656 if (pgs == NULL) { 657 printf("uvm_page_physload: can not malloc vm_page " 658 "structs for segment\n"); 659 printf("\tignoring 0x%lx -> 0x%lx\n", start, end); 660 return; 661 } 662 /* init phys_addr and free_list, and free pages */ 663 for (lcv = 0, paddr = ptoa(start) ; 664 lcv < npages ; lcv++, paddr += PAGE_SIZE) { 665 pgs[lcv].phys_addr = paddr; 666 pgs[lcv].free_list = free_list; 667 if (atop(paddr) >= avail_start && 668 atop(paddr) <= avail_end) 669 uvm_pagefree(&pgs[lcv]); 670 } 671 /* XXXCDC: incomplete: need to update uvmexp.free, what else? */ 672 /* XXXCDC: need hook to tell pmap to rebuild pv_list, etc... */ 673#endif 674 } else { 675 676 /* gcc complains if these don't get init'd */ 677 pgs = NULL; 678 npages = 0; 679 680 } 681 682 /* 683 * now insert us in the proper place in vm_physmem[] 684 */ 685 686#if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM) 687 688 /* random: put it at the end (easy!) */ 689 ps = &vm_physmem[vm_nphysseg]; 690 691#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 692 693 { 694 int x; 695 /* sort by address for binary search */ 696 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 697 if (start < vm_physmem[lcv].start) 698 break; 699 ps = &vm_physmem[lcv]; 700 /* move back other entries, if necessary ... */ 701 for (x = vm_nphysseg ; x > lcv ; x--) 702 /* structure copy */ 703 vm_physmem[x] = vm_physmem[x - 1]; 704 } 705 706#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) 707 708 { 709 int x; 710 /* sort by largest segment first */ 711 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 712 if ((end - start) > 713 (vm_physmem[lcv].end - vm_physmem[lcv].start)) 714 break; 715 ps = &vm_physmem[lcv]; 716 /* move back other entries, if necessary ... */ 717 for (x = vm_nphysseg ; x > lcv ; x--) 718 /* structure copy */ 719 vm_physmem[x] = vm_physmem[x - 1]; 720 } 721 722#else 723 724 panic("uvm_page_physload: unknown physseg strategy selected!"); 725 726#endif 727 728 ps->start = start; 729 ps->end = end; 730 ps->avail_start = avail_start; 731 ps->avail_end = avail_end; 732 if (preload) { 733 ps->pgs = NULL; 734 } else { 735 ps->pgs = pgs; 736 ps->lastpg = pgs + npages - 1; 737 } 738 ps->free_list = free_list; 739 vm_nphysseg++; 740 741 /* 742 * done! 743 */ 744 745 if (!preload) 746 uvm_page_rehash(); 747 748 return; 749} 750 751/* 752 * uvm_page_rehash: reallocate hash table based on number of free pages. 753 */ 754 755void 756uvm_page_rehash(void) 757{ 758 int freepages, lcv, bucketcount, oldcount; 759 struct pglist *newbuckets, *oldbuckets; 760 struct vm_page *pg; 761 size_t newsize, oldsize; 762 763 /* 764 * compute number of pages that can go in the free pool 765 */ 766 767 freepages = 0; 768 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 769 freepages += 770 (vm_physmem[lcv].avail_end - vm_physmem[lcv].avail_start); 771 772 /* 773 * compute number of buckets needed for this number of pages 774 */ 775 776 bucketcount = 1; 777 while (bucketcount < freepages) 778 bucketcount = bucketcount * 2; 779 780 /* 781 * compute the size of the current table and new table. 782 */ 783 784 oldbuckets = uvm.page_hash; 785 oldcount = uvm.page_nhash; 786 oldsize = round_page(sizeof(struct pglist) * oldcount); 787 newsize = round_page(sizeof(struct pglist) * bucketcount); 788 789 /* 790 * allocate the new buckets 791 */ 792 793 newbuckets = (struct pglist *) uvm_km_alloc(kernel_map, newsize); 794 if (newbuckets == NULL) { 795 printf("uvm_page_physrehash: WARNING: could not grow page " 796 "hash table\n"); 797 return; 798 } 799 for (lcv = 0 ; lcv < bucketcount ; lcv++) 800 TAILQ_INIT(&newbuckets[lcv]); 801 802 /* 803 * now replace the old buckets with the new ones and rehash everything 804 */ 805 806 mtx_enter(&uvm.hashlock); 807 uvm.page_hash = newbuckets; 808 uvm.page_nhash = bucketcount; 809 uvm.page_hashmask = bucketcount - 1; /* power of 2 */ 810 811 /* ... and rehash */ 812 for (lcv = 0 ; lcv < oldcount ; lcv++) { 813 while ((pg = TAILQ_FIRST(&oldbuckets[lcv])) != NULL) { 814 TAILQ_REMOVE(&oldbuckets[lcv], pg, hashq); 815 TAILQ_INSERT_TAIL( 816 &uvm.page_hash[uvm_pagehash(pg->uobject, pg->offset)], 817 pg, hashq); 818 } 819 } 820 mtx_leave(&uvm.hashlock); 821 822 /* 823 * free old bucket array if is not the boot-time table 824 */ 825 826 if (oldbuckets != &uvm_bootbucket) 827 uvm_km_free(kernel_map, (vaddr_t) oldbuckets, oldsize); 828 829 /* 830 * done 831 */ 832 return; 833} 834 835 836#ifdef DDB /* XXXCDC: TMP TMP TMP DEBUG DEBUG DEBUG */ 837 838void uvm_page_physdump(void); /* SHUT UP GCC */ 839 840/* call from DDB */ 841void 842uvm_page_physdump(void) 843{ 844 int lcv; 845 846 printf("rehash: physical memory config [segs=%d of %d]:\n", 847 vm_nphysseg, VM_PHYSSEG_MAX); 848 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 849 printf("0x%llx->0x%llx [0x%llx->0x%llx]\n", 850 (long long)vm_physmem[lcv].start, 851 (long long)vm_physmem[lcv].end, 852 (long long)vm_physmem[lcv].avail_start, 853 (long long)vm_physmem[lcv].avail_end); 854 printf("STRATEGY = "); 855 switch (VM_PHYSSEG_STRAT) { 856 case VM_PSTRAT_RANDOM: printf("RANDOM\n"); break; 857 case VM_PSTRAT_BSEARCH: printf("BSEARCH\n"); break; 858 case VM_PSTRAT_BIGFIRST: printf("BIGFIRST\n"); break; 859 default: printf("<<UNKNOWN>>!!!!\n"); 860 } 861 printf("number of buckets = %d\n", uvm.page_nhash); 862} 863#endif 864 865void 866uvm_shutdown(void) 867{ 868#ifdef UVM_SWAP_ENCRYPT 869 uvm_swap_finicrypt_all(); 870#endif 871} 872 873/* 874 * uvm_pagealloc_strat: allocate vm_page from a particular free list. 875 * 876 * => return null if no pages free 877 * => wake up pagedaemon if number of free pages drops below low water mark 878 * => if obj != NULL, obj must be locked (to put in hash) 879 * => if anon != NULL, anon must be locked (to put in anon) 880 * => only one of obj or anon can be non-null 881 * => caller must activate/deactivate page if it is not wired. 882 * => free_list is ignored if strat == UVM_PGA_STRAT_NORMAL. 883 * => policy decision: it is more important to pull a page off of the 884 * appropriate priority free list than it is to get a zero'd or 885 * unknown contents page. This is because we live with the 886 * consequences of a bad free list decision for the entire 887 * lifetime of the page, e.g. if the page comes from memory that 888 * is slower to access. 889 */ 890 891struct vm_page * 892uvm_pagealloc_strat(struct uvm_object *obj, voff_t off, struct vm_anon *anon, 893 int flags, int strat, int free_list) 894{ 895 int lcv, try1, try2, zeroit = 0; 896 struct vm_page *pg; 897 struct pglist *freeq; 898 struct pgfreelist *pgfl; 899 boolean_t use_reserve; 900 UVMHIST_FUNC("uvm_pagealloc_strat"); UVMHIST_CALLED(pghist); 901 902 KASSERT(obj == NULL || anon == NULL); 903 KASSERT(off == trunc_page(off)); 904 905 uvm_lock_fpageq(); 906 907 /* 908 * check to see if we need to generate some free pages waking 909 * the pagedaemon. 910 */ 911 if ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freemin || 912 ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freetarg && 913 uvmexp.inactive < uvmexp.inactarg)) 914 wakeup(&uvm.pagedaemon); 915 916 /* 917 * fail if any of these conditions is true: 918 * [1] there really are no free pages, or 919 * [2] only kernel "reserved" pages remain and 920 * the page isn't being allocated to a kernel object. 921 * [3] only pagedaemon "reserved" pages remain and 922 * the requestor isn't the pagedaemon. 923 */ 924 925 use_reserve = (flags & UVM_PGA_USERESERVE) || 926 (obj && UVM_OBJ_IS_KERN_OBJECT(obj)); 927 if ((uvmexp.free <= uvmexp.reserve_kernel && !use_reserve) || 928 (uvmexp.free <= uvmexp.reserve_pagedaemon && 929 !((curproc == uvm.pagedaemon_proc) || 930 (curproc == syncerproc)))) 931 goto fail; 932 933#if PGFL_NQUEUES != 2 934#error uvm_pagealloc_strat needs to be updated 935#endif 936 937 /* 938 * If we want a zero'd page, try the ZEROS queue first, otherwise 939 * we try the UNKNOWN queue first. 940 */ 941 if (flags & UVM_PGA_ZERO) { 942 try1 = PGFL_ZEROS; 943 try2 = PGFL_UNKNOWN; 944 } else { 945 try1 = PGFL_UNKNOWN; 946 try2 = PGFL_ZEROS; 947 } 948 949 UVMHIST_LOG(pghist, "obj=%p off=%lx anon=%p flags=%lx", 950 obj, (u_long)off, anon, flags); 951 UVMHIST_LOG(pghist, "strat=%ld free_list=%ld", strat, free_list, 0, 0); 952 again: 953 switch (strat) { 954 case UVM_PGA_STRAT_NORMAL: 955 /* Check all freelists in descending priority order. */ 956 for (lcv = 0; lcv < VM_NFREELIST; lcv++) { 957 pgfl = &uvm.page_free[lcv]; 958 if ((pg = TAILQ_FIRST((freeq = 959 &pgfl->pgfl_queues[try1]))) != NULL || 960 (pg = TAILQ_FIRST((freeq = 961 &pgfl->pgfl_queues[try2]))) != NULL) 962 goto gotit; 963 } 964 965 /* No pages free! */ 966 goto fail; 967 968 case UVM_PGA_STRAT_ONLY: 969 case UVM_PGA_STRAT_FALLBACK: 970 /* Attempt to allocate from the specified free list. */ 971 KASSERT(free_list >= 0 && free_list < VM_NFREELIST); 972 pgfl = &uvm.page_free[free_list]; 973 if ((pg = TAILQ_FIRST((freeq = 974 &pgfl->pgfl_queues[try1]))) != NULL || 975 (pg = TAILQ_FIRST((freeq = 976 &pgfl->pgfl_queues[try2]))) != NULL) 977 goto gotit; 978 979 /* Fall back, if possible. */ 980 if (strat == UVM_PGA_STRAT_FALLBACK) { 981 strat = UVM_PGA_STRAT_NORMAL; 982 goto again; 983 } 984 985 /* No pages free! */ 986 goto fail; 987 988 default: 989 panic("uvm_pagealloc_strat: bad strat %d", strat); 990 /* NOTREACHED */ 991 } 992 993 gotit: 994 TAILQ_REMOVE(freeq, pg, pageq); 995 uvmexp.free--; 996 997 /* update zero'd page count */ 998 if (pg->pg_flags & PG_ZERO) 999 uvmexp.zeropages--; 1000 1001 /* 1002 * update allocation statistics and remember if we have to 1003 * zero the page 1004 */ 1005 if (flags & UVM_PGA_ZERO) { 1006 if (pg->pg_flags & PG_ZERO) { 1007 uvmexp.pga_zerohit++; 1008 zeroit = 0; 1009 } else { 1010 uvmexp.pga_zeromiss++; 1011 zeroit = 1; 1012 } 1013 } 1014 1015 uvm_unlock_fpageq(); /* unlock free page queue */ 1016 1017 pg->offset = off; 1018 pg->uobject = obj; 1019 pg->uanon = anon; 1020 pg->pg_flags = PG_BUSY|PG_CLEAN|PG_FAKE; 1021 pg->pg_version++; 1022 if (anon) { 1023 anon->an_page = pg; 1024 atomic_setbits_int(&pg->pg_flags, PQ_ANON); 1025#ifdef UBC 1026 uvm_pgcnt_anon++; 1027#endif 1028 } else { 1029 if (obj) 1030 uvm_pageinsert(pg); 1031 } 1032#if defined(UVM_PAGE_TRKOWN) 1033 pg->owner_tag = NULL; 1034#endif 1035 UVM_PAGE_OWN(pg, "new alloc"); 1036 1037 if (flags & UVM_PGA_ZERO) { 1038 /* 1039 * A zero'd page is not clean. If we got a page not already 1040 * zero'd, then we have to zero it ourselves. 1041 */ 1042 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 1043 if (zeroit) 1044 pmap_zero_page(pg); 1045 } 1046 1047 UVMHIST_LOG(pghist, "allocated pg %p/%lx", pg, 1048 (u_long)VM_PAGE_TO_PHYS(pg), 0, 0); 1049 return(pg); 1050 1051 fail: 1052 uvm_unlock_fpageq(); 1053 UVMHIST_LOG(pghist, "failed!", 0, 0, 0, 0); 1054 return (NULL); 1055} 1056 1057/* 1058 * uvm_pagerealloc: reallocate a page from one object to another 1059 * 1060 * => both objects must be locked 1061 */ 1062 1063void 1064uvm_pagerealloc(struct vm_page *pg, struct uvm_object *newobj, voff_t newoff) 1065{ 1066 1067 UVMHIST_FUNC("uvm_pagerealloc"); UVMHIST_CALLED(pghist); 1068 1069 /* 1070 * remove it from the old object 1071 */ 1072 1073 if (pg->uobject) { 1074 uvm_pageremove(pg); 1075 } 1076 1077 /* 1078 * put it in the new object 1079 */ 1080 1081 if (newobj) { 1082 pg->uobject = newobj; 1083 pg->offset = newoff; 1084 pg->pg_version++; 1085 uvm_pageinsert(pg); 1086 } 1087} 1088 1089 1090/* 1091 * uvm_pagefree: free page 1092 * 1093 * => erase page's identity (i.e. remove from hash/object) 1094 * => put page on free list 1095 * => caller must lock owning object (either anon or uvm_object) 1096 * => caller must lock page queues 1097 * => assumes all valid mappings of pg are gone 1098 */ 1099 1100void 1101uvm_pagefree(struct vm_page *pg) 1102{ 1103 int saved_loan_count = pg->loan_count; 1104 UVMHIST_FUNC("uvm_pagefree"); UVMHIST_CALLED(pghist); 1105 1106#ifdef DEBUG 1107 if (pg->uobject == (void *)0xdeadbeef && 1108 pg->uanon == (void *)0xdeadbeef) { 1109 panic("uvm_pagefree: freeing free page %p", pg); 1110 } 1111#endif 1112 1113 UVMHIST_LOG(pghist, "freeing pg %p/%lx", pg, 1114 (u_long)VM_PAGE_TO_PHYS(pg), 0, 0); 1115 1116 /* 1117 * if the page was an object page (and thus "TABLED"), remove it 1118 * from the object. 1119 */ 1120 1121 if (pg->pg_flags & PG_TABLED) { 1122 1123 /* 1124 * if the object page is on loan we are going to drop ownership. 1125 * it is possible that an anon will take over as owner for this 1126 * page later on. the anon will want a !PG_CLEAN page so that 1127 * it knows it needs to allocate swap if it wants to page the 1128 * page out. 1129 */ 1130 1131 /* in case an anon takes over */ 1132 if (saved_loan_count) 1133 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 1134 uvm_pageremove(pg); 1135 1136 /* 1137 * if our page was on loan, then we just lost control over it 1138 * (in fact, if it was loaned to an anon, the anon may have 1139 * already taken over ownership of the page by now and thus 1140 * changed the loan_count [e.g. in uvmfault_anonget()]) we just 1141 * return (when the last loan is dropped, then the page can be 1142 * freed by whatever was holding the last loan). 1143 */ 1144 1145 if (saved_loan_count) 1146 return; 1147 } else if (saved_loan_count && pg->uanon) { 1148 /* 1149 * if our page is owned by an anon and is loaned out to the 1150 * kernel then we just want to drop ownership and return. 1151 * the kernel must free the page when all its loans clear ... 1152 * note that the kernel can't change the loan status of our 1153 * page as long as we are holding PQ lock. 1154 */ 1155 atomic_clearbits_int(&pg->pg_flags, PQ_ANON); 1156 pg->uanon->an_page = NULL; 1157 pg->uanon = NULL; 1158 return; 1159 } 1160 KASSERT(saved_loan_count == 0); 1161 1162 /* 1163 * now remove the page from the queues 1164 */ 1165 1166 if (pg->pg_flags & PQ_ACTIVE) { 1167 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1168 atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE); 1169 uvmexp.active--; 1170 } 1171 if (pg->pg_flags & PQ_INACTIVE) { 1172 if (pg->pg_flags & PQ_SWAPBACKED) 1173 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); 1174 else 1175 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); 1176 atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE); 1177 uvmexp.inactive--; 1178 } 1179 1180 /* 1181 * if the page was wired, unwire it now. 1182 */ 1183 1184 if (pg->wire_count) { 1185 pg->wire_count = 0; 1186 uvmexp.wired--; 1187 } 1188 if (pg->uanon) { 1189 pg->uanon->an_page = NULL; 1190#ifdef UBC 1191 uvm_pgcnt_anon--; 1192#endif 1193 } 1194 1195 /* 1196 * and put on free queue 1197 */ 1198 1199 atomic_clearbits_int(&pg->pg_flags, PG_ZERO); 1200 1201 uvm_lock_fpageq(); 1202 TAILQ_INSERT_TAIL(&uvm.page_free[ 1203 uvm_page_lookup_freelist(pg)].pgfl_queues[PGFL_UNKNOWN], pg, pageq); 1204 atomic_clearbits_int(&pg->pg_flags, PQ_MASK); 1205 atomic_setbits_int(&pg->pg_flags, PQ_FREE); 1206#ifdef DEBUG 1207 pg->uobject = (void *)0xdeadbeef; 1208 pg->offset = 0xdeadbeef; 1209 pg->uanon = (void *)0xdeadbeef; 1210#endif 1211 uvmexp.free++; 1212 1213 if (uvmexp.zeropages < UVM_PAGEZERO_TARGET) 1214 uvm.page_idle_zero = vm_page_zero_enable; 1215 1216 uvm_unlock_fpageq(); 1217} 1218 1219/* 1220 * uvm_page_unbusy: unbusy an array of pages. 1221 * 1222 * => pages must either all belong to the same object, or all belong to anons. 1223 * => if pages are object-owned, object must be locked. 1224 * => if pages are anon-owned, anons must be unlockd and have 0 refcount. 1225 */ 1226 1227void 1228uvm_page_unbusy(struct vm_page **pgs, int npgs) 1229{ 1230 struct vm_page *pg; 1231 struct uvm_object *uobj; 1232 int i; 1233 UVMHIST_FUNC("uvm_page_unbusy"); UVMHIST_CALLED(pdhist); 1234 1235 for (i = 0; i < npgs; i++) { 1236 pg = pgs[i]; 1237 1238 if (pg == NULL || pg == PGO_DONTCARE) { 1239 continue; 1240 } 1241 if (pg->pg_flags & PG_WANTED) { 1242 wakeup(pg); 1243 } 1244 if (pg->pg_flags & PG_RELEASED) { 1245 UVMHIST_LOG(pdhist, "releasing pg %p", pg,0,0,0); 1246 uobj = pg->uobject; 1247 if (uobj != NULL) { 1248 uvm_lock_pageq(); 1249 pmap_page_protect(pg, VM_PROT_NONE); 1250 /* XXX won't happen right now */ 1251 if (pg->pg_flags & PQ_ANON) 1252 uao_dropswap(uobj, 1253 pg->offset >> PAGE_SHIFT); 1254 uvm_pagefree(pg); 1255 uvm_unlock_pageq(); 1256 } else { 1257 atomic_clearbits_int(&pg->pg_flags, PG_BUSY); 1258 UVM_PAGE_OWN(pg, NULL); 1259 uvm_anfree(pg->uanon); 1260 } 1261 } else { 1262 UVMHIST_LOG(pdhist, "unbusying pg %p", pg,0,0,0); 1263 atomic_clearbits_int(&pg->pg_flags, PG_WANTED|PG_BUSY); 1264 UVM_PAGE_OWN(pg, NULL); 1265 } 1266 } 1267} 1268 1269#if defined(UVM_PAGE_TRKOWN) 1270/* 1271 * uvm_page_own: set or release page ownership 1272 * 1273 * => this is a debugging function that keeps track of who sets PG_BUSY 1274 * and where they do it. it can be used to track down problems 1275 * such a process setting "PG_BUSY" and never releasing it. 1276 * => page's object [if any] must be locked 1277 * => if "tag" is NULL then we are releasing page ownership 1278 */ 1279void 1280uvm_page_own(struct vm_page *pg, char *tag) 1281{ 1282 /* gain ownership? */ 1283 if (tag) { 1284 if (pg->owner_tag) { 1285 printf("uvm_page_own: page %p already owned " 1286 "by proc %d [%s]\n", pg, 1287 pg->owner, pg->owner_tag); 1288 panic("uvm_page_own"); 1289 } 1290 pg->owner = (curproc) ? curproc->p_pid : (pid_t) -1; 1291 pg->owner_tag = tag; 1292 return; 1293 } 1294 1295 /* drop ownership */ 1296 if (pg->owner_tag == NULL) { 1297 printf("uvm_page_own: dropping ownership of an non-owned " 1298 "page (%p)\n", pg); 1299 panic("uvm_page_own"); 1300 } 1301 pg->owner_tag = NULL; 1302 return; 1303} 1304#endif 1305 1306/* 1307 * uvm_pageidlezero: zero free pages while the system is idle. 1308 * 1309 * => we do at least one iteration per call, if we are below the target. 1310 * => we loop until we either reach the target or whichqs indicates that 1311 * there is a process ready to run. 1312 */ 1313void 1314uvm_pageidlezero(void) 1315{ 1316 struct vm_page *pg; 1317 struct pgfreelist *pgfl; 1318 int free_list; 1319 UVMHIST_FUNC("uvm_pageidlezero"); UVMHIST_CALLED(pghist); 1320 1321 do { 1322 uvm_lock_fpageq(); 1323 1324 if (uvmexp.zeropages >= UVM_PAGEZERO_TARGET) { 1325 uvm.page_idle_zero = FALSE; 1326 uvm_unlock_fpageq(); 1327 return; 1328 } 1329 1330 for (free_list = 0; free_list < VM_NFREELIST; free_list++) { 1331 pgfl = &uvm.page_free[free_list]; 1332 if ((pg = TAILQ_FIRST(&pgfl->pgfl_queues[ 1333 PGFL_UNKNOWN])) != NULL) 1334 break; 1335 } 1336 1337 if (pg == NULL) { 1338 /* 1339 * No non-zero'd pages; don't bother trying again 1340 * until we know we have non-zero'd pages free. 1341 */ 1342 uvm.page_idle_zero = FALSE; 1343 uvm_unlock_fpageq(); 1344 return; 1345 } 1346 1347 TAILQ_REMOVE(&pgfl->pgfl_queues[PGFL_UNKNOWN], pg, pageq); 1348 uvmexp.free--; 1349 uvm_unlock_fpageq(); 1350 1351#ifdef PMAP_PAGEIDLEZERO 1352 if (PMAP_PAGEIDLEZERO(pg) == FALSE) { 1353 /* 1354 * The machine-dependent code detected some 1355 * reason for us to abort zeroing pages, 1356 * probably because there is a process now 1357 * ready to run. 1358 */ 1359 uvm_lock_fpageq(); 1360 TAILQ_INSERT_HEAD(&pgfl->pgfl_queues[PGFL_UNKNOWN], 1361 pg, pageq); 1362 uvmexp.free++; 1363 uvmexp.zeroaborts++; 1364 uvm_unlock_fpageq(); 1365 return; 1366 } 1367#else 1368 /* 1369 * XXX This will toast the cache unless the pmap_zero_page() 1370 * XXX implementation does uncached access. 1371 */ 1372 pmap_zero_page(pg); 1373#endif 1374 atomic_setbits_int(&pg->pg_flags, PG_ZERO); 1375 1376 uvm_lock_fpageq(); 1377 TAILQ_INSERT_HEAD(&pgfl->pgfl_queues[PGFL_ZEROS], pg, pageq); 1378 uvmexp.free++; 1379 uvmexp.zeropages++; 1380 uvm_unlock_fpageq(); 1381 } while (curcpu_is_idle()); 1382} 1383 1384/* 1385 * when VM_PHYSSEG_MAX is 1, we can simplify these functions 1386 */ 1387 1388#if VM_PHYSSEG_MAX > 1 1389/* 1390 * vm_physseg_find: find vm_physseg structure that belongs to a PA 1391 */ 1392int 1393vm_physseg_find(paddr_t pframe, int *offp) 1394{ 1395 1396#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 1397 /* binary search for it */ 1398 int start, len, try; 1399 1400 /* 1401 * if try is too large (thus target is less than than try) we reduce 1402 * the length to trunc(len/2) [i.e. everything smaller than "try"] 1403 * 1404 * if the try is too small (thus target is greater than try) then 1405 * we set the new start to be (try + 1). this means we need to 1406 * reduce the length to (round(len/2) - 1). 1407 * 1408 * note "adjust" below which takes advantage of the fact that 1409 * (round(len/2) - 1) == trunc((len - 1) / 2) 1410 * for any value of len we may have 1411 */ 1412 1413 for (start = 0, len = vm_nphysseg ; len != 0 ; len = len / 2) { 1414 try = start + (len / 2); /* try in the middle */ 1415 1416 /* start past our try? */ 1417 if (pframe >= vm_physmem[try].start) { 1418 /* was try correct? */ 1419 if (pframe < vm_physmem[try].end) { 1420 if (offp) 1421 *offp = pframe - vm_physmem[try].start; 1422 return(try); /* got it */ 1423 } 1424 start = try + 1; /* next time, start here */ 1425 len--; /* "adjust" */ 1426 } else { 1427 /* 1428 * pframe before try, just reduce length of 1429 * region, done in "for" loop 1430 */ 1431 } 1432 } 1433 return(-1); 1434 1435#else 1436 /* linear search for it */ 1437 int lcv; 1438 1439 for (lcv = 0; lcv < vm_nphysseg; lcv++) { 1440 if (pframe >= vm_physmem[lcv].start && 1441 pframe < vm_physmem[lcv].end) { 1442 if (offp) 1443 *offp = pframe - vm_physmem[lcv].start; 1444 return(lcv); /* got it */ 1445 } 1446 } 1447 return(-1); 1448 1449#endif 1450} 1451 1452/* 1453 * PHYS_TO_VM_PAGE: find vm_page for a PA. used by MI code to get vm_pages 1454 * back from an I/O mapping (ugh!). used in some MD code as well. 1455 */ 1456struct vm_page * 1457PHYS_TO_VM_PAGE(paddr_t pa) 1458{ 1459 paddr_t pf = atop(pa); 1460 int off; 1461 int psi; 1462 1463 psi = vm_physseg_find(pf, &off); 1464 1465 return ((psi == -1) ? NULL : &vm_physmem[psi].pgs[off]); 1466} 1467#endif /* VM_PHYSSEG_MAX > 1 */ 1468 1469/* 1470 * uvm_pagelookup: look up a page 1471 * 1472 * => caller should lock object to keep someone from pulling the page 1473 * out from under it 1474 */ 1475struct vm_page * 1476uvm_pagelookup(struct uvm_object *obj, voff_t off) 1477{ 1478 struct vm_page *pg; 1479 struct pglist *buck; 1480 1481 mtx_enter(&uvm.hashlock); 1482 buck = &uvm.page_hash[uvm_pagehash(obj,off)]; 1483 1484 TAILQ_FOREACH(pg, buck, hashq) { 1485 if (pg->uobject == obj && pg->offset == off) { 1486 break; 1487 } 1488 } 1489 mtx_leave(&uvm.hashlock); 1490 return(pg); 1491} 1492 1493/* 1494 * uvm_pagewire: wire the page, thus removing it from the daemon's grasp 1495 * 1496 * => caller must lock page queues 1497 */ 1498void 1499uvm_pagewire(struct vm_page *pg) 1500{ 1501 if (pg->wire_count == 0) { 1502 if (pg->pg_flags & PQ_ACTIVE) { 1503 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1504 atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE); 1505 uvmexp.active--; 1506 } 1507 if (pg->pg_flags & PQ_INACTIVE) { 1508 if (pg->pg_flags & PQ_SWAPBACKED) 1509 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); 1510 else 1511 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); 1512 atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE); 1513 uvmexp.inactive--; 1514 } 1515 uvmexp.wired++; 1516 } 1517 pg->wire_count++; 1518} 1519 1520/* 1521 * uvm_pageunwire: unwire the page. 1522 * 1523 * => activate if wire count goes to zero. 1524 * => caller must lock page queues 1525 */ 1526void 1527uvm_pageunwire(struct vm_page *pg) 1528{ 1529 pg->wire_count--; 1530 if (pg->wire_count == 0) { 1531 TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq); 1532 uvmexp.active++; 1533 atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE); 1534 uvmexp.wired--; 1535 } 1536} 1537 1538/* 1539 * uvm_pagedeactivate: deactivate page -- no pmaps have access to page 1540 * 1541 * => caller must lock page queues 1542 * => caller must check to make sure page is not wired 1543 * => object that page belongs to must be locked (so we can adjust pg->flags) 1544 */ 1545void 1546uvm_pagedeactivate(struct vm_page *pg) 1547{ 1548 if (pg->pg_flags & PQ_ACTIVE) { 1549 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1550 atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE); 1551 uvmexp.active--; 1552 } 1553 if ((pg->pg_flags & PQ_INACTIVE) == 0) { 1554 KASSERT(pg->wire_count == 0); 1555 if (pg->pg_flags & PQ_SWAPBACKED) 1556 TAILQ_INSERT_TAIL(&uvm.page_inactive_swp, pg, pageq); 1557 else 1558 TAILQ_INSERT_TAIL(&uvm.page_inactive_obj, pg, pageq); 1559 atomic_setbits_int(&pg->pg_flags, PQ_INACTIVE); 1560 uvmexp.inactive++; 1561 pmap_clear_reference(pg); 1562 /* 1563 * update the "clean" bit. this isn't 100% 1564 * accurate, and doesn't have to be. we'll 1565 * re-sync it after we zap all mappings when 1566 * scanning the inactive list. 1567 */ 1568 if ((pg->pg_flags & PG_CLEAN) != 0 && 1569 pmap_is_modified(pg)) 1570 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 1571 } 1572} 1573 1574/* 1575 * uvm_pageactivate: activate page 1576 * 1577 * => caller must lock page queues 1578 */ 1579void 1580uvm_pageactivate(struct vm_page *pg) 1581{ 1582 if (pg->pg_flags & PQ_INACTIVE) { 1583 if (pg->pg_flags & PQ_SWAPBACKED) 1584 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); 1585 else 1586 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); 1587 atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE); 1588 uvmexp.inactive--; 1589 } 1590 if (pg->wire_count == 0) { 1591 1592 /* 1593 * if page is already active, remove it from list so we 1594 * can put it at tail. if it wasn't active, then mark 1595 * it active and bump active count 1596 */ 1597 if (pg->pg_flags & PQ_ACTIVE) 1598 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1599 else { 1600 atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE); 1601 uvmexp.active++; 1602 } 1603 1604 TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq); 1605 } 1606} 1607 1608/* 1609 * uvm_pagezero: zero fill a page 1610 * 1611 * => if page is part of an object then the object should be locked 1612 * to protect pg->flags. 1613 */ 1614void 1615uvm_pagezero(struct vm_page *pg) 1616{ 1617 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 1618 pmap_zero_page(pg); 1619} 1620 1621/* 1622 * uvm_pagecopy: copy a page 1623 * 1624 * => if page is part of an object then the object should be locked 1625 * to protect pg->flags. 1626 */ 1627void 1628uvm_pagecopy(struct vm_page *src, struct vm_page *dst) 1629{ 1630 atomic_clearbits_int(&dst->pg_flags, PG_CLEAN); 1631 pmap_copy_page(src, dst); 1632} 1633 1634/* 1635 * uvm_page_lookup_freelist: look up the free list for the specified page 1636 */ 1637int 1638uvm_page_lookup_freelist(struct vm_page *pg) 1639{ 1640#if VM_PHYSSEG_MAX == 1 1641 return (vm_physmem[0].free_list); 1642#else 1643 int lcv; 1644 1645 lcv = vm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), NULL); 1646 KASSERT(lcv != -1); 1647 return (vm_physmem[lcv].free_list); 1648#endif 1649} 1650