uvm_page.c revision 1.45
1/* $OpenBSD: uvm_page.c,v 1.45 2002/09/12 12:56:16 art Exp $ */ 2/* $NetBSD: uvm_page.c,v 1.44 2000/11/27 08:40:04 chs Exp $ */ 3 4/* 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * Copyright (c) 1991, 1993, The Regents of the University of California. 7 * 8 * All rights reserved. 9 * 10 * This code is derived from software contributed to Berkeley by 11 * The Mach Operating System project at Carnegie-Mellon University. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by Charles D. Cranor, 24 * Washington University, the University of California, Berkeley and 25 * its contributors. 26 * 4. Neither the name of the University nor the names of its contributors 27 * may be used to endorse or promote products derived from this software 28 * without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 * 42 * @(#)vm_page.c 8.3 (Berkeley) 3/21/94 43 * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp 44 * 45 * 46 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 47 * All rights reserved. 48 * 49 * Permission to use, copy, modify and distribute this software and 50 * its documentation is hereby granted, provided that both the copyright 51 * notice and this permission notice appear in all copies of the 52 * software, derivative works or modified versions, and any portions 53 * thereof, and that both notices appear in supporting documentation. 54 * 55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 56 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 58 * 59 * Carnegie Mellon requests users of this software to return to 60 * 61 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 62 * School of Computer Science 63 * Carnegie Mellon University 64 * Pittsburgh PA 15213-3890 65 * 66 * any improvements or extensions that they make and grant Carnegie the 67 * rights to redistribute these changes. 68 */ 69 70/* 71 * uvm_page.c: page ops. 72 */ 73 74#define UVM_PAGE /* pull in uvm_page.h functions */ 75#include <sys/param.h> 76#include <sys/systm.h> 77#include <sys/malloc.h> 78#include <sys/sched.h> 79#include <sys/kernel.h> 80#include <sys/vnode.h> 81 82#include <uvm/uvm.h> 83 84/* 85 * global vars... XXXCDC: move to uvm. structure. 86 */ 87 88/* 89 * physical memory config is stored in vm_physmem. 90 */ 91 92struct vm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */ 93int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */ 94 95/* 96 * Some supported CPUs in a given architecture don't support all 97 * of the things necessary to do idle page zero'ing efficiently. 98 * We therefore provide a way to disable it from machdep code here. 99 */ 100 101/* 102 * XXX disabled until we can find a way to do this without causing 103 * problems for either cpu caches or DMA latency. 104 */ 105boolean_t vm_page_zero_enable = FALSE; 106 107/* 108 * local variables 109 */ 110 111/* 112 * these variables record the values returned by vm_page_bootstrap, 113 * for debugging purposes. The implementation of uvm_pageboot_alloc 114 * and pmap_startup here also uses them internally. 115 */ 116 117static vaddr_t virtual_space_start; 118static vaddr_t virtual_space_end; 119 120/* 121 * we use a hash table with only one bucket during bootup. we will 122 * later rehash (resize) the hash table once the allocator is ready. 123 * we static allocate the one bootstrap bucket below... 124 */ 125 126static struct pglist uvm_bootbucket; 127 128/* 129 * local prototypes 130 */ 131 132static void uvm_pageinsert(struct vm_page *); 133static void uvm_pageremove(struct vm_page *); 134 135/* 136 * inline functions 137 */ 138 139/* 140 * uvm_pageinsert: insert a page in the object and the hash table 141 * 142 * => caller must lock object 143 * => caller must lock page queues 144 * => call should have already set pg's object and offset pointers 145 * and bumped the version counter 146 */ 147 148__inline static void 149uvm_pageinsert(pg) 150 struct vm_page *pg; 151{ 152 struct pglist *buck; 153 int s; 154 155 KASSERT((pg->flags & PG_TABLED) == 0); 156 buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)]; 157 s = splvm(); 158 simple_lock(&uvm.hashlock); 159 TAILQ_INSERT_TAIL(buck, pg, hashq); /* put in hash */ 160 simple_unlock(&uvm.hashlock); 161 splx(s); 162 163 TAILQ_INSERT_TAIL(&pg->uobject->memq, pg, listq); /* put in object */ 164 pg->flags |= PG_TABLED; 165 pg->uobject->uo_npages++; 166} 167 168/* 169 * uvm_page_remove: remove page from object and hash 170 * 171 * => caller must lock object 172 * => caller must lock page queues 173 */ 174 175static __inline void 176uvm_pageremove(pg) 177 struct vm_page *pg; 178{ 179 struct pglist *buck; 180 int s; 181 182 KASSERT(pg->flags & PG_TABLED); 183 buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)]; 184 s = splvm(); 185 simple_lock(&uvm.hashlock); 186 TAILQ_REMOVE(buck, pg, hashq); 187 simple_unlock(&uvm.hashlock); 188 splx(s); 189 190#ifdef UBC 191 if (pg->uobject->pgops == &uvm_vnodeops) { 192 uvm_pgcnt_vnode--; 193 } 194#endif 195 196 /* object should be locked */ 197 TAILQ_REMOVE(&pg->uobject->memq, pg, listq); 198 199 pg->flags &= ~PG_TABLED; 200 pg->uobject->uo_npages--; 201 pg->uobject = NULL; 202 pg->version++; 203} 204 205/* 206 * uvm_page_init: init the page system. called from uvm_init(). 207 * 208 * => we return the range of kernel virtual memory in kvm_startp/kvm_endp 209 */ 210 211void 212uvm_page_init(kvm_startp, kvm_endp) 213 vaddr_t *kvm_startp, *kvm_endp; 214{ 215 vsize_t freepages, pagecount, n; 216 vm_page_t pagearray; 217 int lcv, i; 218 paddr_t paddr; 219 220 /* 221 * init the page queues and page queue locks 222 */ 223 224 for (lcv = 0; lcv < VM_NFREELIST; lcv++) { 225 for (i = 0; i < PGFL_NQUEUES; i++) 226 TAILQ_INIT(&uvm.page_free[lcv].pgfl_queues[i]); 227 } 228 TAILQ_INIT(&uvm.page_active); 229 TAILQ_INIT(&uvm.page_inactive_swp); 230 TAILQ_INIT(&uvm.page_inactive_obj); 231 simple_lock_init(&uvm.pageqlock); 232 simple_lock_init(&uvm.fpageqlock); 233 234 /* 235 * init the <obj,offset> => <page> hash table. for now 236 * we just have one bucket (the bootstrap bucket). later on we 237 * will allocate new buckets as we dynamically resize the hash table. 238 */ 239 240 uvm.page_nhash = 1; /* 1 bucket */ 241 uvm.page_hashmask = 0; /* mask for hash function */ 242 uvm.page_hash = &uvm_bootbucket; /* install bootstrap bucket */ 243 TAILQ_INIT(uvm.page_hash); /* init hash table */ 244 simple_lock_init(&uvm.hashlock); /* init hash table lock */ 245 246 /* 247 * allocate vm_page structures. 248 */ 249 250 /* 251 * sanity check: 252 * before calling this function the MD code is expected to register 253 * some free RAM with the uvm_page_physload() function. our job 254 * now is to allocate vm_page structures for this memory. 255 */ 256 257 if (vm_nphysseg == 0) 258 panic("uvm_page_bootstrap: no memory pre-allocated"); 259 260 /* 261 * first calculate the number of free pages... 262 * 263 * note that we use start/end rather than avail_start/avail_end. 264 * this allows us to allocate extra vm_page structures in case we 265 * want to return some memory to the pool after booting. 266 */ 267 268 freepages = 0; 269 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 270 freepages += (vm_physmem[lcv].end - vm_physmem[lcv].start); 271 272 /* 273 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can 274 * use. for each page of memory we use we need a vm_page structure. 275 * thus, the total number of pages we can use is the total size of 276 * the memory divided by the PAGE_SIZE plus the size of the vm_page 277 * structure. we add one to freepages as a fudge factor to avoid 278 * truncation errors (since we can only allocate in terms of whole 279 * pages). 280 */ 281 282 pagecount = ((freepages + 1) << PAGE_SHIFT) / 283 (PAGE_SIZE + sizeof(struct vm_page)); 284 pagearray = (vm_page_t)uvm_pageboot_alloc(pagecount * 285 sizeof(struct vm_page)); 286 memset(pagearray, 0, pagecount * sizeof(struct vm_page)); 287 288 /* 289 * init the vm_page structures and put them in the correct place. 290 */ 291 292 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) { 293 n = vm_physmem[lcv].end - vm_physmem[lcv].start; 294 if (n > pagecount) { 295 printf("uvm_page_init: lost %ld page(s) in init\n", 296 (long)(n - pagecount)); 297 panic("uvm_page_init"); /* XXXCDC: shouldn't happen? */ 298 /* n = pagecount; */ 299 } 300 301 /* set up page array pointers */ 302 vm_physmem[lcv].pgs = pagearray; 303 pagearray += n; 304 pagecount -= n; 305 vm_physmem[lcv].lastpg = vm_physmem[lcv].pgs + (n - 1); 306 307 /* init and free vm_pages (we've already zeroed them) */ 308 paddr = ptoa(vm_physmem[lcv].start); 309 for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) { 310 vm_physmem[lcv].pgs[i].phys_addr = paddr; 311#ifdef __HAVE_VM_PAGE_MD 312 VM_MDPAGE_INIT(&vm_physmem[lcv].pgs[i]); 313#endif 314 if (atop(paddr) >= vm_physmem[lcv].avail_start && 315 atop(paddr) <= vm_physmem[lcv].avail_end) { 316 uvmexp.npages++; 317 /* add page to free pool */ 318 uvm_pagefree(&vm_physmem[lcv].pgs[i]); 319 } 320 } 321 } 322 323 /* 324 * pass up the values of virtual_space_start and 325 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper 326 * layers of the VM. 327 */ 328 329 *kvm_startp = round_page(virtual_space_start); 330 *kvm_endp = trunc_page(virtual_space_end); 331 332 /* 333 * init locks for kernel threads 334 */ 335 336 simple_lock_init(&uvm.pagedaemon_lock); 337 simple_lock_init(&uvm.aiodoned_lock); 338 339 /* 340 * init reserve thresholds 341 * XXXCDC - values may need adjusting 342 */ 343 uvmexp.reserve_pagedaemon = 4; 344 uvmexp.reserve_kernel = 6; 345 uvmexp.anonminpct = 10; 346 uvmexp.vnodeminpct = 10; 347 uvmexp.vtextminpct = 5; 348 uvmexp.anonmin = uvmexp.anonminpct * 256 / 100; 349 uvmexp.vnodemin = uvmexp.vnodeminpct * 256 / 100; 350 uvmexp.vtextmin = uvmexp.vtextminpct * 256 / 100; 351 352 /* 353 * determine if we should zero pages in the idle loop. 354 */ 355 356 uvm.page_idle_zero = vm_page_zero_enable; 357 358 /* 359 * done! 360 */ 361 362 uvm.page_init_done = TRUE; 363} 364 365/* 366 * uvm_setpagesize: set the page size 367 * 368 * => sets page_shift and page_mask from uvmexp.pagesize. 369 */ 370 371void 372uvm_setpagesize() 373{ 374 if (uvmexp.pagesize == 0) 375 uvmexp.pagesize = DEFAULT_PAGE_SIZE; 376 uvmexp.pagemask = uvmexp.pagesize - 1; 377 if ((uvmexp.pagemask & uvmexp.pagesize) != 0) 378 panic("uvm_setpagesize: page size not a power of two"); 379 for (uvmexp.pageshift = 0; ; uvmexp.pageshift++) 380 if ((1 << uvmexp.pageshift) == uvmexp.pagesize) 381 break; 382} 383 384/* 385 * uvm_pageboot_alloc: steal memory from physmem for bootstrapping 386 */ 387 388vaddr_t 389uvm_pageboot_alloc(size) 390 vsize_t size; 391{ 392#if defined(PMAP_STEAL_MEMORY) 393 vaddr_t addr; 394 395 /* 396 * defer bootstrap allocation to MD code (it may want to allocate 397 * from a direct-mapped segment). pmap_steal_memory should round 398 * off virtual_space_start/virtual_space_end. 399 */ 400 401 addr = pmap_steal_memory(size, &virtual_space_start, 402 &virtual_space_end); 403 404 return(addr); 405 406#else /* !PMAP_STEAL_MEMORY */ 407 408 static boolean_t initialized = FALSE; 409 vaddr_t addr, vaddr; 410 paddr_t paddr; 411 412 /* round to page size */ 413 size = round_page(size); 414 415 /* 416 * on first call to this function, initialize ourselves. 417 */ 418 if (initialized == FALSE) { 419 pmap_virtual_space(&virtual_space_start, &virtual_space_end); 420 421 /* round it the way we like it */ 422 virtual_space_start = round_page(virtual_space_start); 423 virtual_space_end = trunc_page(virtual_space_end); 424 425 initialized = TRUE; 426 } 427 428 /* 429 * allocate virtual memory for this request 430 */ 431 if (virtual_space_start == virtual_space_end || 432 (virtual_space_end - virtual_space_start) < size) 433 panic("uvm_pageboot_alloc: out of virtual space"); 434 435 addr = virtual_space_start; 436 437#ifdef PMAP_GROWKERNEL 438 /* 439 * If the kernel pmap can't map the requested space, 440 * then allocate more resources for it. 441 */ 442 if (uvm_maxkaddr < (addr + size)) { 443 uvm_maxkaddr = pmap_growkernel(addr + size); 444 if (uvm_maxkaddr < (addr + size)) 445 panic("uvm_pageboot_alloc: pmap_growkernel() failed"); 446 } 447#endif 448 449 virtual_space_start += size; 450 451 /* 452 * allocate and mapin physical pages to back new virtual pages 453 */ 454 455 for (vaddr = round_page(addr) ; vaddr < addr + size ; 456 vaddr += PAGE_SIZE) { 457 458 if (!uvm_page_physget(&paddr)) 459 panic("uvm_pageboot_alloc: out of memory"); 460 461 /* 462 * Note this memory is no longer managed, so using 463 * pmap_kenter is safe. 464 */ 465 pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE); 466 } 467 return(addr); 468#endif /* PMAP_STEAL_MEMORY */ 469} 470 471#if !defined(PMAP_STEAL_MEMORY) 472/* 473 * uvm_page_physget: "steal" one page from the vm_physmem structure. 474 * 475 * => attempt to allocate it off the end of a segment in which the "avail" 476 * values match the start/end values. if we can't do that, then we 477 * will advance both values (making them equal, and removing some 478 * vm_page structures from the non-avail area). 479 * => return false if out of memory. 480 */ 481 482/* subroutine: try to allocate from memory chunks on the specified freelist */ 483static boolean_t uvm_page_physget_freelist(paddr_t *, int); 484 485static boolean_t 486uvm_page_physget_freelist(paddrp, freelist) 487 paddr_t *paddrp; 488 int freelist; 489{ 490 int lcv, x; 491 492 /* pass 1: try allocating from a matching end */ 493#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \ 494 (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 495 for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--) 496#else 497 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 498#endif 499 { 500 501 if (uvm.page_init_done == TRUE) 502 panic("uvm_page_physget: called _after_ bootstrap"); 503 504 if (vm_physmem[lcv].free_list != freelist) 505 continue; 506 507 /* try from front */ 508 if (vm_physmem[lcv].avail_start == vm_physmem[lcv].start && 509 vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) { 510 *paddrp = ptoa(vm_physmem[lcv].avail_start); 511 vm_physmem[lcv].avail_start++; 512 vm_physmem[lcv].start++; 513 /* nothing left? nuke it */ 514 if (vm_physmem[lcv].avail_start == 515 vm_physmem[lcv].end) { 516 if (vm_nphysseg == 1) 517 panic("vum_page_physget: out of memory!"); 518 vm_nphysseg--; 519 for (x = lcv ; x < vm_nphysseg ; x++) 520 /* structure copy */ 521 vm_physmem[x] = vm_physmem[x+1]; 522 } 523 return (TRUE); 524 } 525 526 /* try from rear */ 527 if (vm_physmem[lcv].avail_end == vm_physmem[lcv].end && 528 vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) { 529 *paddrp = ptoa(vm_physmem[lcv].avail_end - 1); 530 vm_physmem[lcv].avail_end--; 531 vm_physmem[lcv].end--; 532 /* nothing left? nuke it */ 533 if (vm_physmem[lcv].avail_end == 534 vm_physmem[lcv].start) { 535 if (vm_nphysseg == 1) 536 panic("uvm_page_physget: out of memory!"); 537 vm_nphysseg--; 538 for (x = lcv ; x < vm_nphysseg ; x++) 539 /* structure copy */ 540 vm_physmem[x] = vm_physmem[x+1]; 541 } 542 return (TRUE); 543 } 544 } 545 546 /* pass2: forget about matching ends, just allocate something */ 547#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \ 548 (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 549 for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--) 550#else 551 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 552#endif 553 { 554 555 /* any room in this bank? */ 556 if (vm_physmem[lcv].avail_start >= vm_physmem[lcv].avail_end) 557 continue; /* nope */ 558 559 *paddrp = ptoa(vm_physmem[lcv].avail_start); 560 vm_physmem[lcv].avail_start++; 561 /* truncate! */ 562 vm_physmem[lcv].start = vm_physmem[lcv].avail_start; 563 564 /* nothing left? nuke it */ 565 if (vm_physmem[lcv].avail_start == vm_physmem[lcv].end) { 566 if (vm_nphysseg == 1) 567 panic("uvm_page_physget: out of memory!"); 568 vm_nphysseg--; 569 for (x = lcv ; x < vm_nphysseg ; x++) 570 /* structure copy */ 571 vm_physmem[x] = vm_physmem[x+1]; 572 } 573 return (TRUE); 574 } 575 576 return (FALSE); /* whoops! */ 577} 578 579boolean_t 580uvm_page_physget(paddrp) 581 paddr_t *paddrp; 582{ 583 int i; 584 585 /* try in the order of freelist preference */ 586 for (i = 0; i < VM_NFREELIST; i++) 587 if (uvm_page_physget_freelist(paddrp, i) == TRUE) 588 return (TRUE); 589 return (FALSE); 590} 591#endif /* PMAP_STEAL_MEMORY */ 592 593/* 594 * uvm_page_physload: load physical memory into VM system 595 * 596 * => all args are PFs 597 * => all pages in start/end get vm_page structures 598 * => areas marked by avail_start/avail_end get added to the free page pool 599 * => we are limited to VM_PHYSSEG_MAX physical memory segments 600 */ 601 602void 603uvm_page_physload(start, end, avail_start, avail_end, free_list) 604 paddr_t start, end, avail_start, avail_end; 605 int free_list; 606{ 607 int preload, lcv; 608 psize_t npages; 609 struct vm_page *pgs; 610 struct vm_physseg *ps; 611 612 if (uvmexp.pagesize == 0) 613 panic("uvm_page_physload: page size not set!"); 614 615 if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT) 616 panic("uvm_page_physload: bad free list %d\n", free_list); 617 618 if (start >= end) 619 panic("uvm_page_physload: start >= end"); 620 621 /* 622 * do we have room? 623 */ 624 if (vm_nphysseg == VM_PHYSSEG_MAX) { 625 printf("uvm_page_physload: unable to load physical memory " 626 "segment\n"); 627 printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n", 628 VM_PHYSSEG_MAX, (long long)start, (long long)end); 629 printf("\tincrease VM_PHYSSEG_MAX\n"); 630 return; 631 } 632 633 /* 634 * check to see if this is a "preload" (i.e. uvm_mem_init hasn't been 635 * called yet, so malloc is not available). 636 */ 637 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) { 638 if (vm_physmem[lcv].pgs) 639 break; 640 } 641 preload = (lcv == vm_nphysseg); 642 643 /* 644 * if VM is already running, attempt to malloc() vm_page structures 645 */ 646 if (!preload) { 647#if defined(VM_PHYSSEG_NOADD) 648 panic("uvm_page_physload: tried to add RAM after vm_mem_init"); 649#else 650 /* XXXCDC: need some sort of lockout for this case */ 651 paddr_t paddr; 652 npages = end - start; /* # of pages */ 653 pgs = (vm_page *)uvm_km_alloc(kernel_map, 654 sizeof(struct vm_page) * npages); 655 if (pgs == NULL) { 656 printf("uvm_page_physload: can not malloc vm_page " 657 "structs for segment\n"); 658 printf("\tignoring 0x%lx -> 0x%lx\n", start, end); 659 return; 660 } 661 /* zero data, init phys_addr and free_list, and free pages */ 662 memset(pgs, 0, sizeof(struct vm_page) * npages); 663 for (lcv = 0, paddr = ptoa(start) ; 664 lcv < npages ; lcv++, paddr += PAGE_SIZE) { 665 pgs[lcv].phys_addr = paddr; 666 pgs[lcv].free_list = free_list; 667 if (atop(paddr) >= avail_start && 668 atop(paddr) <= avail_end) 669 uvm_pagefree(&pgs[lcv]); 670 } 671 /* XXXCDC: incomplete: need to update uvmexp.free, what else? */ 672 /* XXXCDC: need hook to tell pmap to rebuild pv_list, etc... */ 673#endif 674 } else { 675 676 /* gcc complains if these don't get init'd */ 677 pgs = NULL; 678 npages = 0; 679 680 } 681 682 /* 683 * now insert us in the proper place in vm_physmem[] 684 */ 685 686#if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM) 687 688 /* random: put it at the end (easy!) */ 689 ps = &vm_physmem[vm_nphysseg]; 690 691#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 692 693 { 694 int x; 695 /* sort by address for binary search */ 696 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 697 if (start < vm_physmem[lcv].start) 698 break; 699 ps = &vm_physmem[lcv]; 700 /* move back other entries, if necessary ... */ 701 for (x = vm_nphysseg ; x > lcv ; x--) 702 /* structure copy */ 703 vm_physmem[x] = vm_physmem[x - 1]; 704 } 705 706#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) 707 708 { 709 int x; 710 /* sort by largest segment first */ 711 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 712 if ((end - start) > 713 (vm_physmem[lcv].end - vm_physmem[lcv].start)) 714 break; 715 ps = &vm_physmem[lcv]; 716 /* move back other entries, if necessary ... */ 717 for (x = vm_nphysseg ; x > lcv ; x--) 718 /* structure copy */ 719 vm_physmem[x] = vm_physmem[x - 1]; 720 } 721 722#else 723 724 panic("uvm_page_physload: unknown physseg strategy selected!"); 725 726#endif 727 728 ps->start = start; 729 ps->end = end; 730 ps->avail_start = avail_start; 731 ps->avail_end = avail_end; 732 if (preload) { 733 ps->pgs = NULL; 734 } else { 735 ps->pgs = pgs; 736 ps->lastpg = pgs + npages - 1; 737 } 738 ps->free_list = free_list; 739 vm_nphysseg++; 740 741 /* 742 * done! 743 */ 744 745 if (!preload) 746 uvm_page_rehash(); 747 748 return; 749} 750 751/* 752 * uvm_page_rehash: reallocate hash table based on number of free pages. 753 */ 754 755void 756uvm_page_rehash() 757{ 758 int freepages, lcv, bucketcount, s, oldcount; 759 struct pglist *newbuckets, *oldbuckets; 760 struct vm_page *pg; 761 size_t newsize, oldsize; 762 763 /* 764 * compute number of pages that can go in the free pool 765 */ 766 767 freepages = 0; 768 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 769 freepages += 770 (vm_physmem[lcv].avail_end - vm_physmem[lcv].avail_start); 771 772 /* 773 * compute number of buckets needed for this number of pages 774 */ 775 776 bucketcount = 1; 777 while (bucketcount < freepages) 778 bucketcount = bucketcount * 2; 779 780 /* 781 * compute the size of the current table and new table. 782 */ 783 784 oldbuckets = uvm.page_hash; 785 oldcount = uvm.page_nhash; 786 oldsize = round_page(sizeof(struct pglist) * oldcount); 787 newsize = round_page(sizeof(struct pglist) * bucketcount); 788 789 /* 790 * allocate the new buckets 791 */ 792 793 newbuckets = (struct pglist *) uvm_km_alloc(kernel_map, newsize); 794 if (newbuckets == NULL) { 795 printf("uvm_page_physrehash: WARNING: could not grow page " 796 "hash table\n"); 797 return; 798 } 799 for (lcv = 0 ; lcv < bucketcount ; lcv++) 800 TAILQ_INIT(&newbuckets[lcv]); 801 802 /* 803 * now replace the old buckets with the new ones and rehash everything 804 */ 805 806 s = splvm(); 807 simple_lock(&uvm.hashlock); 808 uvm.page_hash = newbuckets; 809 uvm.page_nhash = bucketcount; 810 uvm.page_hashmask = bucketcount - 1; /* power of 2 */ 811 812 /* ... and rehash */ 813 for (lcv = 0 ; lcv < oldcount ; lcv++) { 814 while ((pg = oldbuckets[lcv].tqh_first) != NULL) { 815 TAILQ_REMOVE(&oldbuckets[lcv], pg, hashq); 816 TAILQ_INSERT_TAIL( 817 &uvm.page_hash[uvm_pagehash(pg->uobject, pg->offset)], 818 pg, hashq); 819 } 820 } 821 simple_unlock(&uvm.hashlock); 822 splx(s); 823 824 /* 825 * free old bucket array if is not the boot-time table 826 */ 827 828 if (oldbuckets != &uvm_bootbucket) 829 uvm_km_free(kernel_map, (vaddr_t) oldbuckets, oldsize); 830 831 /* 832 * done 833 */ 834 return; 835} 836 837 838#if 1 /* XXXCDC: TMP TMP TMP DEBUG DEBUG DEBUG */ 839 840void uvm_page_physdump(void); /* SHUT UP GCC */ 841 842/* call from DDB */ 843void 844uvm_page_physdump() 845{ 846 int lcv; 847 848 printf("rehash: physical memory config [segs=%d of %d]:\n", 849 vm_nphysseg, VM_PHYSSEG_MAX); 850 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 851 printf("0x%llx->0x%llx [0x%llx->0x%llx]\n", 852 (long long)vm_physmem[lcv].start, 853 (long long)vm_physmem[lcv].end, 854 (long long)vm_physmem[lcv].avail_start, 855 (long long)vm_physmem[lcv].avail_end); 856 printf("STRATEGY = "); 857 switch (VM_PHYSSEG_STRAT) { 858 case VM_PSTRAT_RANDOM: printf("RANDOM\n"); break; 859 case VM_PSTRAT_BSEARCH: printf("BSEARCH\n"); break; 860 case VM_PSTRAT_BIGFIRST: printf("BIGFIRST\n"); break; 861 default: printf("<<UNKNOWN>>!!!!\n"); 862 } 863 printf("number of buckets = %d\n", uvm.page_nhash); 864} 865#endif 866 867/* 868 * uvm_pagealloc_strat: allocate vm_page from a particular free list. 869 * 870 * => return null if no pages free 871 * => wake up pagedaemon if number of free pages drops below low water mark 872 * => if obj != NULL, obj must be locked (to put in hash) 873 * => if anon != NULL, anon must be locked (to put in anon) 874 * => only one of obj or anon can be non-null 875 * => caller must activate/deactivate page if it is not wired. 876 * => free_list is ignored if strat == UVM_PGA_STRAT_NORMAL. 877 * => policy decision: it is more important to pull a page off of the 878 * appropriate priority free list than it is to get a zero'd or 879 * unknown contents page. This is because we live with the 880 * consequences of a bad free list decision for the entire 881 * lifetime of the page, e.g. if the page comes from memory that 882 * is slower to access. 883 */ 884 885struct vm_page * 886uvm_pagealloc_strat(obj, off, anon, flags, strat, free_list) 887 struct uvm_object *obj; 888 voff_t off; 889 int flags; 890 struct vm_anon *anon; 891 int strat, free_list; 892{ 893 int lcv, try1, try2, s, zeroit = 0; 894 struct vm_page *pg; 895 struct pglist *freeq; 896 struct pgfreelist *pgfl; 897 boolean_t use_reserve; 898 899 KASSERT(obj == NULL || anon == NULL); 900 KASSERT(off == trunc_page(off)); 901 s = uvm_lock_fpageq(); 902 903 /* 904 * check to see if we need to generate some free pages waking 905 * the pagedaemon. 906 */ 907 908#ifdef UBC 909 if (uvmexp.free + uvmexp.paging < uvmexp.freemin || 910 (uvmexp.free + uvmexp.paging < uvmexp.freetarg && 911 uvmexp.inactive < uvmexp.inactarg)) { 912 wakeup(&uvm.pagedaemon); 913 } 914#else 915 if (uvmexp.free < uvmexp.freemin || (uvmexp.free < uvmexp.freetarg && 916 uvmexp.inactive < uvmexp.inactarg)) 917 wakeup(&uvm.pagedaemon); 918#endif 919 920 /* 921 * fail if any of these conditions is true: 922 * [1] there really are no free pages, or 923 * [2] only kernel "reserved" pages remain and 924 * the page isn't being allocated to a kernel object. 925 * [3] only pagedaemon "reserved" pages remain and 926 * the requestor isn't the pagedaemon. 927 */ 928 929 use_reserve = (flags & UVM_PGA_USERESERVE) || 930 (obj && UVM_OBJ_IS_KERN_OBJECT(obj)); 931 if ((uvmexp.free <= uvmexp.reserve_kernel && !use_reserve) || 932 (uvmexp.free <= uvmexp.reserve_pagedaemon && 933 !(use_reserve && (curproc == uvm.pagedaemon_proc || 934 curproc == syncerproc)))) 935 goto fail; 936 937#if PGFL_NQUEUES != 2 938#error uvm_pagealloc_strat needs to be updated 939#endif 940 941 /* 942 * If we want a zero'd page, try the ZEROS queue first, otherwise 943 * we try the UNKNOWN queue first. 944 */ 945 if (flags & UVM_PGA_ZERO) { 946 try1 = PGFL_ZEROS; 947 try2 = PGFL_UNKNOWN; 948 } else { 949 try1 = PGFL_UNKNOWN; 950 try2 = PGFL_ZEROS; 951 } 952 953 again: 954 switch (strat) { 955 case UVM_PGA_STRAT_NORMAL: 956 /* Check all freelists in descending priority order. */ 957 for (lcv = 0; lcv < VM_NFREELIST; lcv++) { 958 pgfl = &uvm.page_free[lcv]; 959 if ((pg = TAILQ_FIRST((freeq = 960 &pgfl->pgfl_queues[try1]))) != NULL || 961 (pg = TAILQ_FIRST((freeq = 962 &pgfl->pgfl_queues[try2]))) != NULL) 963 goto gotit; 964 } 965 966 /* No pages free! */ 967 goto fail; 968 969 case UVM_PGA_STRAT_ONLY: 970 case UVM_PGA_STRAT_FALLBACK: 971 /* Attempt to allocate from the specified free list. */ 972 KASSERT(free_list >= 0 && free_list < VM_NFREELIST); 973 pgfl = &uvm.page_free[free_list]; 974 if ((pg = TAILQ_FIRST((freeq = 975 &pgfl->pgfl_queues[try1]))) != NULL || 976 (pg = TAILQ_FIRST((freeq = 977 &pgfl->pgfl_queues[try2]))) != NULL) 978 goto gotit; 979 980 /* Fall back, if possible. */ 981 if (strat == UVM_PGA_STRAT_FALLBACK) { 982 strat = UVM_PGA_STRAT_NORMAL; 983 goto again; 984 } 985 986 /* No pages free! */ 987 goto fail; 988 989 default: 990 panic("uvm_pagealloc_strat: bad strat %d", strat); 991 /* NOTREACHED */ 992 } 993 994 gotit: 995 TAILQ_REMOVE(freeq, pg, pageq); 996 uvmexp.free--; 997 998 /* update zero'd page count */ 999 if (pg->flags & PG_ZERO) 1000 uvmexp.zeropages--; 1001 1002 /* 1003 * update allocation statistics and remember if we have to 1004 * zero the page 1005 */ 1006 if (flags & UVM_PGA_ZERO) { 1007 if (pg->flags & PG_ZERO) { 1008 uvmexp.pga_zerohit++; 1009 zeroit = 0; 1010 } else { 1011 uvmexp.pga_zeromiss++; 1012 zeroit = 1; 1013 } 1014 } 1015 1016 uvm_unlock_fpageq(s); /* unlock free page queue */ 1017 1018 pg->offset = off; 1019 pg->uobject = obj; 1020 pg->uanon = anon; 1021 pg->flags = PG_BUSY|PG_CLEAN|PG_FAKE; 1022 pg->version++; 1023 if (anon) { 1024 anon->u.an_page = pg; 1025 pg->pqflags = PQ_ANON; 1026#ifdef UBC 1027 uvm_pgcnt_anon++; 1028#endif 1029 } else { 1030 if (obj) 1031 uvm_pageinsert(pg); 1032 pg->pqflags = 0; 1033 } 1034#if defined(UVM_PAGE_TRKOWN) 1035 pg->owner_tag = NULL; 1036#endif 1037 UVM_PAGE_OWN(pg, "new alloc"); 1038 1039 if (flags & UVM_PGA_ZERO) { 1040 /* 1041 * A zero'd page is not clean. If we got a page not already 1042 * zero'd, then we have to zero it ourselves. 1043 */ 1044 pg->flags &= ~PG_CLEAN; 1045 if (zeroit) 1046 pmap_zero_page(pg); 1047 } 1048 1049 return(pg); 1050 1051 fail: 1052 uvm_unlock_fpageq(s); 1053 return (NULL); 1054} 1055 1056/* 1057 * uvm_pagerealloc: reallocate a page from one object to another 1058 * 1059 * => both objects must be locked 1060 */ 1061 1062void 1063uvm_pagerealloc(pg, newobj, newoff) 1064 struct vm_page *pg; 1065 struct uvm_object *newobj; 1066 voff_t newoff; 1067{ 1068 /* 1069 * remove it from the old object 1070 */ 1071 1072 if (pg->uobject) { 1073 uvm_pageremove(pg); 1074 } 1075 1076 /* 1077 * put it in the new object 1078 */ 1079 1080 if (newobj) { 1081 pg->uobject = newobj; 1082 pg->offset = newoff; 1083 pg->version++; 1084 uvm_pageinsert(pg); 1085 } 1086} 1087 1088 1089/* 1090 * uvm_pagefree: free page 1091 * 1092 * => erase page's identity (i.e. remove from hash/object) 1093 * => put page on free list 1094 * => caller must lock owning object (either anon or uvm_object) 1095 * => caller must lock page queues 1096 * => assumes all valid mappings of pg are gone 1097 */ 1098 1099void 1100uvm_pagefree(pg) 1101 struct vm_page *pg; 1102{ 1103 int s; 1104 int saved_loan_count = pg->loan_count; 1105 1106#ifdef DEBUG 1107 if (pg->uobject == (void *)0xdeadbeef && 1108 pg->uanon == (void *)0xdeadbeef) { 1109 panic("uvm_pagefree: freeing free page %p\n", pg); 1110 } 1111#endif 1112 1113 /* 1114 * if the page was an object page (and thus "TABLED"), remove it 1115 * from the object. 1116 */ 1117 1118 if (pg->flags & PG_TABLED) { 1119 1120 /* 1121 * if the object page is on loan we are going to drop ownership. 1122 * it is possible that an anon will take over as owner for this 1123 * page later on. the anon will want a !PG_CLEAN page so that 1124 * it knows it needs to allocate swap if it wants to page the 1125 * page out. 1126 */ 1127 1128 if (saved_loan_count) 1129 pg->flags &= ~PG_CLEAN; /* in case an anon takes over */ 1130 uvm_pageremove(pg); 1131 1132 /* 1133 * if our page was on loan, then we just lost control over it 1134 * (in fact, if it was loaned to an anon, the anon may have 1135 * already taken over ownership of the page by now and thus 1136 * changed the loan_count [e.g. in uvmfault_anonget()]) we just 1137 * return (when the last loan is dropped, then the page can be 1138 * freed by whatever was holding the last loan). 1139 */ 1140 1141 if (saved_loan_count) 1142 return; 1143 } else if (saved_loan_count && (pg->pqflags & PQ_ANON)) { 1144 1145 /* 1146 * if our page is owned by an anon and is loaned out to the 1147 * kernel then we just want to drop ownership and return. 1148 * the kernel must free the page when all its loans clear ... 1149 * note that the kernel can't change the loan status of our 1150 * page as long as we are holding PQ lock. 1151 */ 1152 1153 pg->pqflags &= ~PQ_ANON; 1154 pg->uanon = NULL; 1155 return; 1156 } 1157 KASSERT(saved_loan_count == 0); 1158 1159 /* 1160 * now remove the page from the queues 1161 */ 1162 1163 if (pg->pqflags & PQ_ACTIVE) { 1164 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1165 pg->pqflags &= ~PQ_ACTIVE; 1166 uvmexp.active--; 1167 } 1168 if (pg->pqflags & PQ_INACTIVE) { 1169 if (pg->pqflags & PQ_SWAPBACKED) 1170 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); 1171 else 1172 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); 1173 pg->pqflags &= ~PQ_INACTIVE; 1174 uvmexp.inactive--; 1175 } 1176 1177 /* 1178 * if the page was wired, unwire it now. 1179 */ 1180 1181 if (pg->wire_count) { 1182 pg->wire_count = 0; 1183 uvmexp.wired--; 1184 } 1185#ifdef UBC 1186 if (pg->uanon) { 1187 uvm_pgcnt_anon--; 1188 } 1189#endif 1190 1191 /* 1192 * and put on free queue 1193 */ 1194 1195 pg->flags &= ~PG_ZERO; 1196 1197 s = uvm_lock_fpageq(); 1198 TAILQ_INSERT_TAIL(&uvm.page_free[ 1199 uvm_page_lookup_freelist(pg)].pgfl_queues[PGFL_UNKNOWN], pg, pageq); 1200 pg->pqflags = PQ_FREE; 1201#ifdef DEBUG 1202 pg->uobject = (void *)0xdeadbeef; 1203 pg->offset = 0xdeadbeef; 1204 pg->uanon = (void *)0xdeadbeef; 1205#endif 1206 uvmexp.free++; 1207 1208 if (uvmexp.zeropages < UVM_PAGEZERO_TARGET) 1209 uvm.page_idle_zero = vm_page_zero_enable; 1210 1211 uvm_unlock_fpageq(s); 1212} 1213 1214/* 1215 * uvm_page_unbusy: unbusy an array of pages. 1216 * 1217 * => pages must either all belong to the same object, or all belong to anons. 1218 * => if pages are object-owned, object must be locked. 1219 * => if pages are anon-owned, anons must be unlockd and have 0 refcount. 1220 */ 1221 1222void 1223uvm_page_unbusy(pgs, npgs) 1224 struct vm_page **pgs; 1225 int npgs; 1226{ 1227 struct vm_page *pg; 1228 struct uvm_object *uobj; 1229 int i; 1230 UVMHIST_FUNC("uvm_page_unbusy"); UVMHIST_CALLED(ubchist); 1231 1232 for (i = 0; i < npgs; i++) { 1233 pg = pgs[i]; 1234 1235 if (pg == NULL) { 1236 continue; 1237 } 1238 if (pg->flags & PG_WANTED) { 1239 wakeup(pg); 1240 } 1241 if (pg->flags & PG_RELEASED) { 1242 UVMHIST_LOG(ubchist, "releasing pg %p", pg,0,0,0); 1243 uobj = pg->uobject; 1244 if (uobj != NULL) { 1245 uobj->pgops->pgo_releasepg(pg, NULL); 1246 } else { 1247 pg->flags &= ~(PG_BUSY); 1248 UVM_PAGE_OWN(pg, NULL); 1249 uvm_anfree(pg->uanon); 1250 } 1251 } else { 1252 UVMHIST_LOG(ubchist, "unbusying pg %p", pg,0,0,0); 1253 pg->flags &= ~(PG_WANTED|PG_BUSY); 1254 UVM_PAGE_OWN(pg, NULL); 1255 } 1256 } 1257} 1258 1259#if defined(UVM_PAGE_TRKOWN) 1260/* 1261 * uvm_page_own: set or release page ownership 1262 * 1263 * => this is a debugging function that keeps track of who sets PG_BUSY 1264 * and where they do it. it can be used to track down problems 1265 * such a process setting "PG_BUSY" and never releasing it. 1266 * => page's object [if any] must be locked 1267 * => if "tag" is NULL then we are releasing page ownership 1268 */ 1269void 1270uvm_page_own(pg, tag) 1271 struct vm_page *pg; 1272 char *tag; 1273{ 1274 /* gain ownership? */ 1275 if (tag) { 1276 if (pg->owner_tag) { 1277 printf("uvm_page_own: page %p already owned " 1278 "by proc %d [%s]\n", pg, 1279 pg->owner, pg->owner_tag); 1280 panic("uvm_page_own"); 1281 } 1282 pg->owner = (curproc) ? curproc->p_pid : (pid_t) -1; 1283 pg->owner_tag = tag; 1284 return; 1285 } 1286 1287 /* drop ownership */ 1288 if (pg->owner_tag == NULL) { 1289 printf("uvm_page_own: dropping ownership of an non-owned " 1290 "page (%p)\n", pg); 1291 panic("uvm_page_own"); 1292 } 1293 pg->owner_tag = NULL; 1294 return; 1295} 1296#endif 1297 1298/* 1299 * uvm_pageidlezero: zero free pages while the system is idle. 1300 * 1301 * => we do at least one iteration per call, if we are below the target. 1302 * => we loop until we either reach the target or whichqs indicates that 1303 * there is a process ready to run. 1304 */ 1305void 1306uvm_pageidlezero() 1307{ 1308 struct vm_page *pg; 1309 struct pgfreelist *pgfl; 1310 int free_list, s; 1311 1312 do { 1313 s = uvm_lock_fpageq(); 1314 1315 if (uvmexp.zeropages >= UVM_PAGEZERO_TARGET) { 1316 uvm.page_idle_zero = FALSE; 1317 uvm_unlock_fpageq(s); 1318 return; 1319 } 1320 1321 for (free_list = 0; free_list < VM_NFREELIST; free_list++) { 1322 pgfl = &uvm.page_free[free_list]; 1323 if ((pg = TAILQ_FIRST(&pgfl->pgfl_queues[ 1324 PGFL_UNKNOWN])) != NULL) 1325 break; 1326 } 1327 1328 if (pg == NULL) { 1329 /* 1330 * No non-zero'd pages; don't bother trying again 1331 * until we know we have non-zero'd pages free. 1332 */ 1333 uvm.page_idle_zero = FALSE; 1334 uvm_unlock_fpageq(s); 1335 return; 1336 } 1337 1338 TAILQ_REMOVE(&pgfl->pgfl_queues[PGFL_UNKNOWN], pg, pageq); 1339 uvmexp.free--; 1340 uvm_unlock_fpageq(s); 1341 1342#ifdef PMAP_PAGEIDLEZERO 1343 if (PMAP_PAGEIDLEZERO(pg) == FALSE) { 1344 /* 1345 * The machine-dependent code detected some 1346 * reason for us to abort zeroing pages, 1347 * probably because there is a process now 1348 * ready to run. 1349 */ 1350 s = uvm_lock_fpageq(); 1351 TAILQ_INSERT_HEAD(&pgfl->pgfl_queues[PGFL_UNKNOWN], 1352 pg, pageq); 1353 uvmexp.free++; 1354 uvmexp.zeroaborts++; 1355 uvm_unlock_fpageq(s); 1356 return; 1357 } 1358#else 1359 /* 1360 * XXX This will toast the cache unless the pmap_zero_page() 1361 * XXX implementation does uncached access. 1362 */ 1363 pmap_zero_page(pg); 1364#endif 1365 pg->flags |= PG_ZERO; 1366 1367 s = uvm_lock_fpageq(); 1368 TAILQ_INSERT_HEAD(&pgfl->pgfl_queues[PGFL_ZEROS], pg, pageq); 1369 uvmexp.free++; 1370 uvmexp.zeropages++; 1371 uvm_unlock_fpageq(s); 1372 } while (whichqs == 0); 1373} 1374