uvm_page.c revision 1.37
1/* $OpenBSD: uvm_page.c,v 1.37 2001/12/04 23:22:42 art Exp $ */ 2/* $NetBSD: uvm_page.c,v 1.66 2001/09/10 21:19:43 chris Exp $ */ 3 4/* 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * Copyright (c) 1991, 1993, The Regents of the University of California. 7 * 8 * All rights reserved. 9 * 10 * This code is derived from software contributed to Berkeley by 11 * The Mach Operating System project at Carnegie-Mellon University. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by Charles D. Cranor, 24 * Washington University, the University of California, Berkeley and 25 * its contributors. 26 * 4. Neither the name of the University nor the names of its contributors 27 * may be used to endorse or promote products derived from this software 28 * without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 * 42 * @(#)vm_page.c 8.3 (Berkeley) 3/21/94 43 * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp 44 * 45 * 46 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 47 * All rights reserved. 48 * 49 * Permission to use, copy, modify and distribute this software and 50 * its documentation is hereby granted, provided that both the copyright 51 * notice and this permission notice appear in all copies of the 52 * software, derivative works or modified versions, and any portions 53 * thereof, and that both notices appear in supporting documentation. 54 * 55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 56 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 58 * 59 * Carnegie Mellon requests users of this software to return to 60 * 61 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 62 * School of Computer Science 63 * Carnegie Mellon University 64 * Pittsburgh PA 15213-3890 65 * 66 * any improvements or extensions that they make and grant Carnegie the 67 * rights to redistribute these changes. 68 */ 69 70/* 71 * uvm_page.c: page ops. 72 */ 73 74#define UVM_PAGE /* pull in uvm_page.h functions */ 75#include <sys/param.h> 76#include <sys/systm.h> 77#include <sys/malloc.h> 78#include <sys/sched.h> 79#include <sys/kernel.h> 80#include <sys/vnode.h> 81 82#include <uvm/uvm.h> 83 84/* 85 * global vars... XXXCDC: move to uvm. structure. 86 */ 87 88/* 89 * physical memory config is stored in vm_physmem. 90 */ 91 92struct vm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */ 93int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */ 94 95/* 96 * Some supported CPUs in a given architecture don't support all 97 * of the things necessary to do idle page zero'ing efficiently. 98 * We therefore provide a way to disable it from machdep code here. 99 */ 100 101/* 102 * XXX disabled until we can find a way to do this without causing 103 * problems for either cpu caches or DMA latency. 104 */ 105boolean_t vm_page_zero_enable = FALSE; 106 107/* 108 * local variables 109 */ 110 111/* 112 * these variables record the values returned by vm_page_bootstrap, 113 * for debugging purposes. The implementation of uvm_pageboot_alloc 114 * and pmap_startup here also uses them internally. 115 */ 116 117static vaddr_t virtual_space_start; 118static vaddr_t virtual_space_end; 119 120/* 121 * we use a hash table with only one bucket during bootup. we will 122 * later rehash (resize) the hash table once the allocator is ready. 123 * we static allocate the one bootstrap bucket below... 124 */ 125 126static struct pglist uvm_bootbucket; 127 128/* 129 * we allocate an initial number of page colors in uvm_page_init(), 130 * and remember them. We may re-color pages as cache sizes are 131 * discovered during the autoconfiguration phase. But we can never 132 * free the initial set of buckets, since they are allocated using 133 * uvm_pageboot_alloc(). 134 */ 135 136static boolean_t have_recolored_pages /* = FALSE */; 137 138/* 139 * local prototypes 140 */ 141 142static void uvm_pageinsert __P((struct vm_page *)); 143static void uvm_pageremove __P((struct vm_page *)); 144 145/* 146 * inline functions 147 */ 148 149/* 150 * uvm_pageinsert: insert a page in the object and the hash table 151 * 152 * => caller must lock object 153 * => caller must lock page queues 154 * => call should have already set pg's object and offset pointers 155 * and bumped the version counter 156 */ 157 158__inline static void 159uvm_pageinsert(pg) 160 struct vm_page *pg; 161{ 162 struct pglist *buck; 163 int s; 164 165 KASSERT((pg->flags & PG_TABLED) == 0); 166 buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)]; 167 s = splvm(); 168 simple_lock(&uvm.hashlock); 169 TAILQ_INSERT_TAIL(buck, pg, hashq); /* put in hash */ 170 simple_unlock(&uvm.hashlock); 171 splx(s); 172 173 TAILQ_INSERT_TAIL(&pg->uobject->memq, pg, listq); /* put in object */ 174 pg->flags |= PG_TABLED; 175 pg->uobject->uo_npages++; 176} 177 178/* 179 * uvm_page_remove: remove page from object and hash 180 * 181 * => caller must lock object 182 * => caller must lock page queues 183 */ 184 185static __inline void 186uvm_pageremove(pg) 187 struct vm_page *pg; 188{ 189 struct pglist *buck; 190 int s; 191 192 KASSERT(pg->flags & PG_TABLED); 193 buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)]; 194 s = splvm(); 195 simple_lock(&uvm.hashlock); 196 TAILQ_REMOVE(buck, pg, hashq); 197 simple_unlock(&uvm.hashlock); 198 splx(s); 199 200 if (UVM_OBJ_IS_VTEXT(pg->uobject)) { 201 uvmexp.vtextpages--; 202 } else if (UVM_OBJ_IS_VNODE(pg->uobject)) { 203 uvmexp.vnodepages--; 204 } 205 206 /* object should be locked */ 207 TAILQ_REMOVE(&pg->uobject->memq, pg, listq); 208 209 pg->flags &= ~PG_TABLED; 210 pg->uobject->uo_npages--; 211 pg->uobject = NULL; 212 pg->version++; 213} 214 215static void 216uvm_page_init_buckets(struct pgfreelist *pgfl) 217{ 218 int color, i; 219 220 for (color = 0; color < uvmexp.ncolors; color++) { 221 for (i = 0; i < PGFL_NQUEUES; i++) { 222 TAILQ_INIT(&pgfl->pgfl_buckets[ 223 color].pgfl_queues[i]); 224 } 225 } 226} 227 228/* 229 * uvm_page_init: init the page system. called from uvm_init(). 230 * 231 * => we return the range of kernel virtual memory in kvm_startp/kvm_endp 232 */ 233 234void 235uvm_page_init(kvm_startp, kvm_endp) 236 vaddr_t *kvm_startp, *kvm_endp; 237{ 238 vsize_t freepages, pagecount, bucketcount, n; 239 struct pgflbucket *bucketarray; 240 struct vm_page *pagearray; 241 int lcv, i; 242 paddr_t paddr; 243 244 /* 245 * init the page queues and page queue locks, except the free 246 * list; we allocate that later (with the initial vm_page 247 * structures). 248 */ 249 250 TAILQ_INIT(&uvm.page_active); 251 TAILQ_INIT(&uvm.page_inactive); 252 simple_lock_init(&uvm.pageqlock); 253 simple_lock_init(&uvm.fpageqlock); 254 255 /* 256 * init the <obj,offset> => <page> hash table. for now 257 * we just have one bucket (the bootstrap bucket). later on we 258 * will allocate new buckets as we dynamically resize the hash table. 259 */ 260 261 uvm.page_nhash = 1; /* 1 bucket */ 262 uvm.page_hashmask = 0; /* mask for hash function */ 263 uvm.page_hash = &uvm_bootbucket; /* install bootstrap bucket */ 264 TAILQ_INIT(uvm.page_hash); /* init hash table */ 265 simple_lock_init(&uvm.hashlock); /* init hash table lock */ 266 267 /* 268 * allocate vm_page structures. 269 */ 270 271 /* 272 * sanity check: 273 * before calling this function the MD code is expected to register 274 * some free RAM with the uvm_page_physload() function. our job 275 * now is to allocate vm_page structures for this memory. 276 */ 277 278 if (vm_nphysseg == 0) 279 panic("uvm_page_bootstrap: no memory pre-allocated"); 280 281 /* 282 * first calculate the number of free pages... 283 * 284 * note that we use start/end rather than avail_start/avail_end. 285 * this allows us to allocate extra vm_page structures in case we 286 * want to return some memory to the pool after booting. 287 */ 288 289 freepages = 0; 290 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 291 freepages += (vm_physmem[lcv].end - vm_physmem[lcv].start); 292 293 /* 294 * Let MD code initialize the number of colors, or default 295 * to 1 color if MD code doesn't care. 296 */ 297 if (uvmexp.ncolors == 0) 298 uvmexp.ncolors = 1; 299 uvmexp.colormask = uvmexp.ncolors - 1; 300 301 /* 302 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can 303 * use. for each page of memory we use we need a vm_page structure. 304 * thus, the total number of pages we can use is the total size of 305 * the memory divided by the PAGE_SIZE plus the size of the vm_page 306 * structure. we add one to freepages as a fudge factor to avoid 307 * truncation errors (since we can only allocate in terms of whole 308 * pages). 309 */ 310 311 bucketcount = uvmexp.ncolors * VM_NFREELIST; 312 pagecount = ((freepages + 1) << PAGE_SHIFT) / 313 (PAGE_SIZE + sizeof(struct vm_page)); 314 315 bucketarray = (void *) uvm_pageboot_alloc((bucketcount * 316 sizeof(struct pgflbucket)) + (pagecount * 317 sizeof(struct vm_page))); 318 pagearray = (struct vm_page *)(bucketarray + bucketcount); 319 320 for (lcv = 0; lcv < VM_NFREELIST; lcv++) { 321 uvm.page_free[lcv].pgfl_buckets = 322 (bucketarray + (lcv * uvmexp.ncolors)); 323 uvm_page_init_buckets(&uvm.page_free[lcv]); 324 } 325 326 memset(pagearray, 0, pagecount * sizeof(struct vm_page)); 327 328 /* 329 * init the vm_page structures and put them in the correct place. 330 */ 331 332 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) { 333 n = vm_physmem[lcv].end - vm_physmem[lcv].start; 334 if (n > pagecount) { 335 printf("uvm_page_init: lost %ld page(s) in init\n", 336 (long)(n - pagecount)); 337 panic("uvm_page_init"); /* XXXCDC: shouldn't happen? */ 338 /* n = pagecount; */ 339 } 340 341 /* set up page array pointers */ 342 vm_physmem[lcv].pgs = pagearray; 343 pagearray += n; 344 pagecount -= n; 345 vm_physmem[lcv].lastpg = vm_physmem[lcv].pgs + (n - 1); 346 347 /* init and free vm_pages (we've already zeroed them) */ 348 paddr = ptoa(vm_physmem[lcv].start); 349 for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) { 350 vm_physmem[lcv].pgs[i].phys_addr = paddr; 351#ifdef __HAVE_VM_PAGE_MD 352 VM_MDPAGE_INIT(&vm_physmem[lcv].pgs[i]); 353#endif 354 if (atop(paddr) >= vm_physmem[lcv].avail_start && 355 atop(paddr) <= vm_physmem[lcv].avail_end) { 356 uvmexp.npages++; 357 /* add page to free pool */ 358 uvm_pagefree(&vm_physmem[lcv].pgs[i]); 359 } 360 } 361 } 362 363 /* 364 * pass up the values of virtual_space_start and 365 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper 366 * layers of the VM. 367 */ 368 369 *kvm_startp = round_page(virtual_space_start); 370 *kvm_endp = trunc_page(virtual_space_end); 371 372 /* 373 * init locks for kernel threads 374 */ 375 376 simple_lock_init(&uvm.pagedaemon_lock); 377 simple_lock_init(&uvm.aiodoned_lock); 378 379 /* 380 * init reserve thresholds 381 * XXXCDC - values may need adjusting 382 */ 383 uvmexp.reserve_pagedaemon = 4; 384 uvmexp.reserve_kernel = 6; 385 uvmexp.anonminpct = 10; 386 uvmexp.vnodeminpct = 10; 387 uvmexp.vtextminpct = 5; 388 uvmexp.anonmin = uvmexp.anonminpct * 256 / 100; 389 uvmexp.vnodemin = uvmexp.vnodeminpct * 256 / 100; 390 uvmexp.vtextmin = uvmexp.vtextminpct * 256 / 100; 391 392 /* 393 * determine if we should zero pages in the idle loop. 394 */ 395 396 uvm.page_idle_zero = vm_page_zero_enable; 397 398 /* 399 * done! 400 */ 401 402 uvm.page_init_done = TRUE; 403} 404 405/* 406 * uvm_setpagesize: set the page size 407 * 408 * => sets page_shift and page_mask from uvmexp.pagesize. 409 */ 410 411void 412uvm_setpagesize() 413{ 414 if (uvmexp.pagesize == 0) 415 uvmexp.pagesize = DEFAULT_PAGE_SIZE; 416 uvmexp.pagemask = uvmexp.pagesize - 1; 417 if ((uvmexp.pagemask & uvmexp.pagesize) != 0) 418 panic("uvm_setpagesize: page size not a power of two"); 419 for (uvmexp.pageshift = 0; ; uvmexp.pageshift++) 420 if ((1 << uvmexp.pageshift) == uvmexp.pagesize) 421 break; 422} 423 424/* 425 * uvm_pageboot_alloc: steal memory from physmem for bootstrapping 426 */ 427 428vaddr_t 429uvm_pageboot_alloc(size) 430 vsize_t size; 431{ 432 static boolean_t initialized = FALSE; 433 vaddr_t addr; 434#if !defined(PMAP_STEAL_MEMORY) 435 vaddr_t vaddr; 436 paddr_t paddr; 437#endif 438 439 /* 440 * on first call to this function, initialize ourselves. 441 */ 442 if (initialized == FALSE) { 443 pmap_virtual_space(&virtual_space_start, &virtual_space_end); 444 445 /* round it the way we like it */ 446 virtual_space_start = round_page(virtual_space_start); 447 virtual_space_end = trunc_page(virtual_space_end); 448 449 initialized = TRUE; 450 } 451 452 /* round to page size */ 453 size = round_page(size); 454 455#if defined(PMAP_STEAL_MEMORY) 456 457 /* 458 * defer bootstrap allocation to MD code (it may want to allocate 459 * from a direct-mapped segment). pmap_steal_memory should adjust 460 * virtual_space_start/virtual_space_end if necessary. 461 */ 462 463 addr = pmap_steal_memory(size, &virtual_space_start, 464 &virtual_space_end); 465 466 return(addr); 467 468#else /* !PMAP_STEAL_MEMORY */ 469 470 /* 471 * allocate virtual memory for this request 472 */ 473 if (virtual_space_start == virtual_space_end || 474 (virtual_space_end - virtual_space_start) < size) 475 panic("uvm_pageboot_alloc: out of virtual space"); 476 477 addr = virtual_space_start; 478 479#ifdef PMAP_GROWKERNEL 480 /* 481 * If the kernel pmap can't map the requested space, 482 * then allocate more resources for it. 483 */ 484 if (uvm_maxkaddr < (addr + size)) { 485 uvm_maxkaddr = pmap_growkernel(addr + size); 486 if (uvm_maxkaddr < (addr + size)) 487 panic("uvm_pageboot_alloc: pmap_growkernel() failed"); 488 } 489#endif 490 491 virtual_space_start += size; 492 493 /* 494 * allocate and mapin physical pages to back new virtual pages 495 */ 496 497 for (vaddr = round_page(addr) ; vaddr < addr + size ; 498 vaddr += PAGE_SIZE) { 499 500 if (!uvm_page_physget(&paddr)) 501 panic("uvm_pageboot_alloc: out of memory"); 502 503 /* 504 * Note this memory is no longer managed, so using 505 * pmap_kenter is safe. 506 */ 507 pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE); 508 } 509 pmap_update(pmap_kernel()); 510 return(addr); 511#endif /* PMAP_STEAL_MEMORY */ 512} 513 514#if !defined(PMAP_STEAL_MEMORY) 515/* 516 * uvm_page_physget: "steal" one page from the vm_physmem structure. 517 * 518 * => attempt to allocate it off the end of a segment in which the "avail" 519 * values match the start/end values. if we can't do that, then we 520 * will advance both values (making them equal, and removing some 521 * vm_page structures from the non-avail area). 522 * => return false if out of memory. 523 */ 524 525/* subroutine: try to allocate from memory chunks on the specified freelist */ 526static boolean_t uvm_page_physget_freelist __P((paddr_t *, int)); 527 528static boolean_t 529uvm_page_physget_freelist(paddrp, freelist) 530 paddr_t *paddrp; 531 int freelist; 532{ 533 int lcv, x; 534 535 /* pass 1: try allocating from a matching end */ 536#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \ 537 (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 538 for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--) 539#else 540 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 541#endif 542 { 543 544 if (uvm.page_init_done == TRUE) 545 panic("uvm_page_physget: called _after_ bootstrap"); 546 547 if (vm_physmem[lcv].free_list != freelist) 548 continue; 549 550 /* try from front */ 551 if (vm_physmem[lcv].avail_start == vm_physmem[lcv].start && 552 vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) { 553 *paddrp = ptoa(vm_physmem[lcv].avail_start); 554 vm_physmem[lcv].avail_start++; 555 vm_physmem[lcv].start++; 556 /* nothing left? nuke it */ 557 if (vm_physmem[lcv].avail_start == 558 vm_physmem[lcv].end) { 559 if (vm_nphysseg == 1) 560 panic("vum_page_physget: out of memory!"); 561 vm_nphysseg--; 562 for (x = lcv ; x < vm_nphysseg ; x++) 563 /* structure copy */ 564 vm_physmem[x] = vm_physmem[x+1]; 565 } 566 return (TRUE); 567 } 568 569 /* try from rear */ 570 if (vm_physmem[lcv].avail_end == vm_physmem[lcv].end && 571 vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) { 572 *paddrp = ptoa(vm_physmem[lcv].avail_end - 1); 573 vm_physmem[lcv].avail_end--; 574 vm_physmem[lcv].end--; 575 /* nothing left? nuke it */ 576 if (vm_physmem[lcv].avail_end == 577 vm_physmem[lcv].start) { 578 if (vm_nphysseg == 1) 579 panic("uvm_page_physget: out of memory!"); 580 vm_nphysseg--; 581 for (x = lcv ; x < vm_nphysseg ; x++) 582 /* structure copy */ 583 vm_physmem[x] = vm_physmem[x+1]; 584 } 585 return (TRUE); 586 } 587 } 588 589 /* pass2: forget about matching ends, just allocate something */ 590#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \ 591 (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 592 for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--) 593#else 594 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 595#endif 596 { 597 598 /* any room in this bank? */ 599 if (vm_physmem[lcv].avail_start >= vm_physmem[lcv].avail_end) 600 continue; /* nope */ 601 602 *paddrp = ptoa(vm_physmem[lcv].avail_start); 603 vm_physmem[lcv].avail_start++; 604 /* truncate! */ 605 vm_physmem[lcv].start = vm_physmem[lcv].avail_start; 606 607 /* nothing left? nuke it */ 608 if (vm_physmem[lcv].avail_start == vm_physmem[lcv].end) { 609 if (vm_nphysseg == 1) 610 panic("uvm_page_physget: out of memory!"); 611 vm_nphysseg--; 612 for (x = lcv ; x < vm_nphysseg ; x++) 613 /* structure copy */ 614 vm_physmem[x] = vm_physmem[x+1]; 615 } 616 return (TRUE); 617 } 618 619 return (FALSE); /* whoops! */ 620} 621 622boolean_t 623uvm_page_physget(paddrp) 624 paddr_t *paddrp; 625{ 626 int i; 627 628 /* try in the order of freelist preference */ 629 for (i = 0; i < VM_NFREELIST; i++) 630 if (uvm_page_physget_freelist(paddrp, i) == TRUE) 631 return (TRUE); 632 return (FALSE); 633} 634#endif /* PMAP_STEAL_MEMORY */ 635 636/* 637 * uvm_page_physload: load physical memory into VM system 638 * 639 * => all args are PFs 640 * => all pages in start/end get vm_page structures 641 * => areas marked by avail_start/avail_end get added to the free page pool 642 * => we are limited to VM_PHYSSEG_MAX physical memory segments 643 */ 644 645void 646uvm_page_physload(start, end, avail_start, avail_end, free_list) 647 paddr_t start, end, avail_start, avail_end; 648 int free_list; 649{ 650 int preload, lcv; 651 psize_t npages; 652 struct vm_page *pgs; 653 struct vm_physseg *ps; 654 655 if (uvmexp.pagesize == 0) 656 panic("uvm_page_physload: page size not set!"); 657 658 if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT) 659 panic("uvm_page_physload: bad free list %d\n", free_list); 660 661 if (start >= end) 662 panic("uvm_page_physload: start >= end"); 663 664 /* 665 * do we have room? 666 */ 667 if (vm_nphysseg == VM_PHYSSEG_MAX) { 668 printf("uvm_page_physload: unable to load physical memory " 669 "segment\n"); 670 printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n", 671 VM_PHYSSEG_MAX, (long long)start, (long long)end); 672 printf("\tincrease VM_PHYSSEG_MAX\n"); 673 return; 674 } 675 676 /* 677 * check to see if this is a "preload" (i.e. uvm_mem_init hasn't been 678 * called yet, so malloc is not available). 679 */ 680 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) { 681 if (vm_physmem[lcv].pgs) 682 break; 683 } 684 preload = (lcv == vm_nphysseg); 685 686 /* 687 * if VM is already running, attempt to malloc() vm_page structures 688 */ 689 if (!preload) { 690#if defined(VM_PHYSSEG_NOADD) 691 panic("uvm_page_physload: tried to add RAM after vm_mem_init"); 692#else 693 /* XXXCDC: need some sort of lockout for this case */ 694 paddr_t paddr; 695 npages = end - start; /* # of pages */ 696 pgs = malloc(sizeof(struct vm_page) * npages, 697 M_VMPAGE, M_NOWAIT); 698 if (pgs == NULL) { 699 printf("uvm_page_physload: can not malloc vm_page " 700 "structs for segment\n"); 701 printf("\tignoring 0x%lx -> 0x%lx\n", start, end); 702 return; 703 } 704 /* zero data, init phys_addr and free_list, and free pages */ 705 memset(pgs, 0, sizeof(struct vm_page) * npages); 706 for (lcv = 0, paddr = ptoa(start) ; 707 lcv < npages ; lcv++, paddr += PAGE_SIZE) { 708 pgs[lcv].phys_addr = paddr; 709 pgs[lcv].free_list = free_list; 710 if (atop(paddr) >= avail_start && 711 atop(paddr) <= avail_end) 712 uvm_pagefree(&pgs[lcv]); 713 } 714 /* XXXCDC: incomplete: need to update uvmexp.free, what else? */ 715 /* XXXCDC: need hook to tell pmap to rebuild pv_list, etc... */ 716#endif 717 } else { 718 719 /* gcc complains if these don't get init'd */ 720 pgs = NULL; 721 npages = 0; 722 723 } 724 725 /* 726 * now insert us in the proper place in vm_physmem[] 727 */ 728 729#if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM) 730 731 /* random: put it at the end (easy!) */ 732 ps = &vm_physmem[vm_nphysseg]; 733 734#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 735 736 { 737 int x; 738 /* sort by address for binary search */ 739 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 740 if (start < vm_physmem[lcv].start) 741 break; 742 ps = &vm_physmem[lcv]; 743 /* move back other entries, if necessary ... */ 744 for (x = vm_nphysseg ; x > lcv ; x--) 745 /* structure copy */ 746 vm_physmem[x] = vm_physmem[x - 1]; 747 } 748 749#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) 750 751 { 752 int x; 753 /* sort by largest segment first */ 754 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 755 if ((end - start) > 756 (vm_physmem[lcv].end - vm_physmem[lcv].start)) 757 break; 758 ps = &vm_physmem[lcv]; 759 /* move back other entries, if necessary ... */ 760 for (x = vm_nphysseg ; x > lcv ; x--) 761 /* structure copy */ 762 vm_physmem[x] = vm_physmem[x - 1]; 763 } 764 765#else 766 767 panic("uvm_page_physload: unknown physseg strategy selected!"); 768 769#endif 770 771 ps->start = start; 772 ps->end = end; 773 ps->avail_start = avail_start; 774 ps->avail_end = avail_end; 775 if (preload) { 776 ps->pgs = NULL; 777 } else { 778 ps->pgs = pgs; 779 ps->lastpg = pgs + npages - 1; 780 } 781 ps->free_list = free_list; 782 vm_nphysseg++; 783 784 /* 785 * done! 786 */ 787 788 if (!preload) 789 uvm_page_rehash(); 790 791 return; 792} 793 794/* 795 * uvm_page_rehash: reallocate hash table based on number of free pages. 796 */ 797 798void 799uvm_page_rehash() 800{ 801 int freepages, lcv, bucketcount, s, oldcount; 802 struct pglist *newbuckets, *oldbuckets; 803 struct vm_page *pg; 804 size_t newsize, oldsize; 805 806 /* 807 * compute number of pages that can go in the free pool 808 */ 809 810 freepages = 0; 811 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 812 freepages += 813 (vm_physmem[lcv].avail_end - vm_physmem[lcv].avail_start); 814 815 /* 816 * compute number of buckets needed for this number of pages 817 */ 818 819 bucketcount = 1; 820 while (bucketcount < freepages) 821 bucketcount = bucketcount * 2; 822 823 /* 824 * compute the size of the current table and new table. 825 */ 826 827 oldbuckets = uvm.page_hash; 828 oldcount = uvm.page_nhash; 829 oldsize = round_page(sizeof(struct pglist) * oldcount); 830 newsize = round_page(sizeof(struct pglist) * bucketcount); 831 832 /* 833 * allocate the new buckets 834 */ 835 836 newbuckets = (struct pglist *) uvm_km_alloc(kernel_map, newsize); 837 if (newbuckets == NULL) { 838 printf("uvm_page_physrehash: WARNING: could not grow page " 839 "hash table\n"); 840 return; 841 } 842 for (lcv = 0 ; lcv < bucketcount ; lcv++) 843 TAILQ_INIT(&newbuckets[lcv]); 844 845 /* 846 * now replace the old buckets with the new ones and rehash everything 847 */ 848 849 s = splvm(); 850 simple_lock(&uvm.hashlock); 851 uvm.page_hash = newbuckets; 852 uvm.page_nhash = bucketcount; 853 uvm.page_hashmask = bucketcount - 1; /* power of 2 */ 854 855 /* ... and rehash */ 856 for (lcv = 0 ; lcv < oldcount ; lcv++) { 857 while ((pg = oldbuckets[lcv].tqh_first) != NULL) { 858 TAILQ_REMOVE(&oldbuckets[lcv], pg, hashq); 859 TAILQ_INSERT_TAIL( 860 &uvm.page_hash[uvm_pagehash(pg->uobject, pg->offset)], 861 pg, hashq); 862 } 863 } 864 simple_unlock(&uvm.hashlock); 865 splx(s); 866 867 /* 868 * free old bucket array if is not the boot-time table 869 */ 870 871 if (oldbuckets != &uvm_bootbucket) 872 uvm_km_free(kernel_map, (vaddr_t) oldbuckets, oldsize); 873 874 /* 875 * done 876 */ 877 return; 878} 879 880/* 881 * uvm_page_recolor: Recolor the pages if the new bucket count is 882 * larger than the old one. 883 */ 884 885void 886uvm_page_recolor(int newncolors) 887{ 888 struct pgflbucket *bucketarray, *oldbucketarray; 889 struct pgfreelist pgfl; 890 struct vm_page *pg; 891 vsize_t bucketcount; 892 int s, lcv, color, i, ocolors; 893 894 if (newncolors <= uvmexp.ncolors) 895 return; 896 897 bucketcount = newncolors * VM_NFREELIST; 898 bucketarray = malloc(bucketcount * sizeof(struct pgflbucket), 899 M_VMPAGE, M_NOWAIT); 900 if (bucketarray == NULL) { 901 printf("WARNING: unable to allocate %ld page color buckets\n", 902 (long) bucketcount); 903 return; 904 } 905 906 s = uvm_lock_fpageq(); 907 908 /* Make sure we should still do this. */ 909 if (newncolors <= uvmexp.ncolors) { 910 uvm_unlock_fpageq(s); 911 free(bucketarray, M_VMPAGE); 912 return; 913 } 914 915 oldbucketarray = uvm.page_free[0].pgfl_buckets; 916 ocolors = uvmexp.ncolors; 917 918 uvmexp.ncolors = newncolors; 919 uvmexp.colormask = uvmexp.ncolors - 1; 920 921 for (lcv = 0; lcv < VM_NFREELIST; lcv++) { 922 pgfl.pgfl_buckets = (bucketarray + (lcv * newncolors)); 923 uvm_page_init_buckets(&pgfl); 924 for (color = 0; color < ocolors; color++) { 925 for (i = 0; i < PGFL_NQUEUES; i++) { 926 while ((pg = TAILQ_FIRST(&uvm.page_free[ 927 lcv].pgfl_buckets[color].pgfl_queues[i])) 928 != NULL) { 929 TAILQ_REMOVE(&uvm.page_free[ 930 lcv].pgfl_buckets[ 931 color].pgfl_queues[i], pg, pageq); 932 TAILQ_INSERT_TAIL(&pgfl.pgfl_buckets[ 933 VM_PGCOLOR_BUCKET(pg)].pgfl_queues[ 934 i], pg, pageq); 935 } 936 } 937 } 938 uvm.page_free[lcv].pgfl_buckets = pgfl.pgfl_buckets; 939 } 940 941 if (have_recolored_pages) { 942 uvm_unlock_fpageq(s); 943 free(oldbucketarray, M_VMPAGE); 944 return; 945 } 946 947 have_recolored_pages = TRUE; 948 uvm_unlock_fpageq(s); 949} 950 951#if 1 /* XXXCDC: TMP TMP TMP DEBUG DEBUG DEBUG */ 952 953void uvm_page_physdump __P((void)); /* SHUT UP GCC */ 954 955/* call from DDB */ 956void 957uvm_page_physdump() 958{ 959 int lcv; 960 961 printf("rehash: physical memory config [segs=%d of %d]:\n", 962 vm_nphysseg, VM_PHYSSEG_MAX); 963 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 964 printf("0x%llx->0x%llx [0x%llx->0x%llx]\n", 965 (long long)vm_physmem[lcv].start, 966 (long long)vm_physmem[lcv].end, 967 (long long)vm_physmem[lcv].avail_start, 968 (long long)vm_physmem[lcv].avail_end); 969 printf("STRATEGY = "); 970 switch (VM_PHYSSEG_STRAT) { 971 case VM_PSTRAT_RANDOM: printf("RANDOM\n"); break; 972 case VM_PSTRAT_BSEARCH: printf("BSEARCH\n"); break; 973 case VM_PSTRAT_BIGFIRST: printf("BIGFIRST\n"); break; 974 default: printf("<<UNKNOWN>>!!!!\n"); 975 } 976 printf("number of buckets = %d\n", uvm.page_nhash); 977} 978#endif 979 980/* 981 * uvm_pagealloc_pgfl: helper routine for uvm_pagealloc_strat 982 */ 983 984static __inline struct vm_page * 985uvm_pagealloc_pgfl(struct pgfreelist *pgfl, int try1, int try2, 986 unsigned int *trycolorp) 987{ 988 struct pglist *freeq; 989 struct vm_page *pg; 990 int color, trycolor = *trycolorp; 991 992 color = trycolor; 993 do { 994 if ((pg = TAILQ_FIRST((freeq = 995 &pgfl->pgfl_buckets[color].pgfl_queues[try1]))) != NULL) 996 goto gotit; 997 if ((pg = TAILQ_FIRST((freeq = 998 &pgfl->pgfl_buckets[color].pgfl_queues[try2]))) != NULL) 999 goto gotit; 1000 color = (color + 1) & uvmexp.colormask; 1001 } while (color != trycolor); 1002 1003 return (NULL); 1004 1005 gotit: 1006 TAILQ_REMOVE(freeq, pg, pageq); 1007 uvmexp.free--; 1008 1009 /* update zero'd page count */ 1010 if (pg->flags & PG_ZERO) 1011 uvmexp.zeropages--; 1012 1013 if (color == trycolor) 1014 uvmexp.colorhit++; 1015 else { 1016 uvmexp.colormiss++; 1017 *trycolorp = color; 1018 } 1019 1020 return (pg); 1021} 1022 1023/* 1024 * uvm_pagealloc_strat: allocate vm_page from a particular free list. 1025 * 1026 * => return null if no pages free 1027 * => wake up pagedaemon if number of free pages drops below low water mark 1028 * => if obj != NULL, obj must be locked (to put in hash) 1029 * => if anon != NULL, anon must be locked (to put in anon) 1030 * => only one of obj or anon can be non-null 1031 * => caller must activate/deactivate page if it is not wired. 1032 * => free_list is ignored if strat == UVM_PGA_STRAT_NORMAL. 1033 * => policy decision: it is more important to pull a page off of the 1034 * appropriate priority free list than it is to get a zero'd or 1035 * unknown contents page. This is because we live with the 1036 * consequences of a bad free list decision for the entire 1037 * lifetime of the page, e.g. if the page comes from memory that 1038 * is slower to access. 1039 */ 1040 1041struct vm_page * 1042uvm_pagealloc_strat(obj, off, anon, flags, strat, free_list) 1043 struct uvm_object *obj; 1044 voff_t off; 1045 int flags; 1046 struct vm_anon *anon; 1047 int strat, free_list; 1048{ 1049 int lcv, try1, try2, s, zeroit = 0, color; 1050 struct vm_page *pg; 1051 boolean_t use_reserve; 1052 1053 KASSERT(obj == NULL || anon == NULL); 1054 KASSERT(off == trunc_page(off)); 1055 1056 LOCK_ASSERT(obj == NULL || simple_lock_held(&obj->vmobjlock)); 1057 LOCK_ASSERT(anon == NULL || simple_lock_held(&anon->an_lock)); 1058 1059 s = uvm_lock_fpageq(); 1060 1061 /* 1062 * This implements a global round-robin page coloring 1063 * algorithm. 1064 * 1065 * XXXJRT: Should we make the `nextcolor' per-cpu? 1066 * XXXJRT: What about virtually-indexed caches? 1067 */ 1068 color = uvm.page_free_nextcolor; 1069 1070 /* 1071 * check to see if we need to generate some free pages waking 1072 * the pagedaemon. 1073 */ 1074 1075 UVM_KICK_PDAEMON(); 1076 1077 /* 1078 * fail if any of these conditions is true: 1079 * [1] there really are no free pages, or 1080 * [2] only kernel "reserved" pages remain and 1081 * the page isn't being allocated to a kernel object. 1082 * [3] only pagedaemon "reserved" pages remain and 1083 * the requestor isn't the pagedaemon. 1084 */ 1085 1086 use_reserve = (flags & UVM_PGA_USERESERVE) || 1087 (obj && UVM_OBJ_IS_KERN_OBJECT(obj)); 1088 if ((uvmexp.free <= uvmexp.reserve_kernel && !use_reserve) || 1089 (uvmexp.free <= uvmexp.reserve_pagedaemon && 1090 !(use_reserve && (curproc == uvm.pagedaemon_proc || 1091 curproc == syncerproc)))) 1092 goto fail; 1093 1094#if PGFL_NQUEUES != 2 1095#error uvm_pagealloc_strat needs to be updated 1096#endif 1097 1098 /* 1099 * If we want a zero'd page, try the ZEROS queue first, otherwise 1100 * we try the UNKNOWN queue first. 1101 */ 1102 if (flags & UVM_PGA_ZERO) { 1103 try1 = PGFL_ZEROS; 1104 try2 = PGFL_UNKNOWN; 1105 } else { 1106 try1 = PGFL_UNKNOWN; 1107 try2 = PGFL_ZEROS; 1108 } 1109 1110 again: 1111 switch (strat) { 1112 case UVM_PGA_STRAT_NORMAL: 1113 /* Check all freelists in descending priority order. */ 1114 for (lcv = 0; lcv < VM_NFREELIST; lcv++) { 1115 pg = uvm_pagealloc_pgfl(&uvm.page_free[lcv], 1116 try1, try2, &color); 1117 if (pg != NULL) 1118 goto gotit; 1119 } 1120 1121 /* No pages free! */ 1122 goto fail; 1123 1124 case UVM_PGA_STRAT_ONLY: 1125 case UVM_PGA_STRAT_FALLBACK: 1126 /* Attempt to allocate from the specified free list. */ 1127 KASSERT(free_list >= 0 && free_list < VM_NFREELIST); 1128 pg = uvm_pagealloc_pgfl(&uvm.page_free[free_list], 1129 try1, try2, &color); 1130 if (pg != NULL) 1131 goto gotit; 1132 1133 /* Fall back, if possible. */ 1134 if (strat == UVM_PGA_STRAT_FALLBACK) { 1135 strat = UVM_PGA_STRAT_NORMAL; 1136 goto again; 1137 } 1138 1139 /* No pages free! */ 1140 goto fail; 1141 1142 default: 1143 panic("uvm_pagealloc_strat: bad strat %d", strat); 1144 /* NOTREACHED */ 1145 } 1146 1147 gotit: 1148 /* 1149 * We now know which color we actually allocated from; set 1150 * the next color accordingly. 1151 */ 1152 uvm.page_free_nextcolor = (color + 1) & uvmexp.colormask; 1153 1154 /* 1155 * update allocation statistics and remember if we have to 1156 * zero the page 1157 */ 1158 if (flags & UVM_PGA_ZERO) { 1159 if (pg->flags & PG_ZERO) { 1160 uvmexp.pga_zerohit++; 1161 zeroit = 0; 1162 } else { 1163 uvmexp.pga_zeromiss++; 1164 zeroit = 1; 1165 } 1166 } 1167 1168 uvm_unlock_fpageq(s); /* unlock free page queue */ 1169 1170 pg->offset = off; 1171 pg->uobject = obj; 1172 pg->uanon = anon; 1173 pg->flags = PG_BUSY|PG_CLEAN|PG_FAKE; 1174 pg->version++; 1175 if (anon) { 1176 anon->u.an_page = pg; 1177 pg->pqflags = PQ_ANON; 1178 uvmexp.anonpages++; 1179 } else { 1180 if (obj) 1181 uvm_pageinsert(pg); 1182 pg->pqflags = 0; 1183 } 1184#if defined(UVM_PAGE_TRKOWN) 1185 pg->owner_tag = NULL; 1186#endif 1187 UVM_PAGE_OWN(pg, "new alloc"); 1188 1189 if (flags & UVM_PGA_ZERO) { 1190 /* 1191 * A zero'd page is not clean. If we got a page not already 1192 * zero'd, then we have to zero it ourselves. 1193 */ 1194 pg->flags &= ~PG_CLEAN; 1195 if (zeroit) 1196 pmap_zero_page(VM_PAGE_TO_PHYS(pg)); 1197 } 1198 1199 return(pg); 1200 1201 fail: 1202 uvm_unlock_fpageq(s); 1203 return (NULL); 1204} 1205 1206/* 1207 * uvm_pagerealloc: reallocate a page from one object to another 1208 * 1209 * => both objects must be locked 1210 */ 1211 1212void 1213uvm_pagerealloc(pg, newobj, newoff) 1214 struct vm_page *pg; 1215 struct uvm_object *newobj; 1216 voff_t newoff; 1217{ 1218 /* 1219 * remove it from the old object 1220 */ 1221 1222 if (pg->uobject) { 1223 uvm_pageremove(pg); 1224 } 1225 1226 /* 1227 * put it in the new object 1228 */ 1229 1230 if (newobj) { 1231 pg->uobject = newobj; 1232 pg->offset = newoff; 1233 pg->version++; 1234 uvm_pageinsert(pg); 1235 } 1236} 1237 1238 1239/* 1240 * uvm_pagefree: free page 1241 * 1242 * => erase page's identity (i.e. remove from hash/object) 1243 * => put page on free list 1244 * => caller must lock owning object (either anon or uvm_object) 1245 * => caller must lock page queues 1246 * => assumes all valid mappings of pg are gone 1247 */ 1248 1249void 1250uvm_pagefree(pg) 1251 struct vm_page *pg; 1252{ 1253 int s; 1254 int saved_loan_count = pg->loan_count; 1255 1256#ifdef DEBUG 1257 if (pg->uobject == (void *)0xdeadbeef && 1258 pg->uanon == (void *)0xdeadbeef) { 1259 panic("uvm_pagefree: freeing free page %p\n", pg); 1260 } 1261#endif 1262 1263 /* 1264 * if the page was an object page (and thus "TABLED"), remove it 1265 * from the object. 1266 */ 1267 1268 if (pg->flags & PG_TABLED) { 1269 1270 /* 1271 * if the object page is on loan we are going to drop ownership. 1272 * it is possible that an anon will take over as owner for this 1273 * page later on. the anon will want a !PG_CLEAN page so that 1274 * it knows it needs to allocate swap if it wants to page the 1275 * page out. 1276 */ 1277 1278 if (saved_loan_count) 1279 pg->flags &= ~PG_CLEAN; /* in case an anon takes over */ 1280 uvm_pageremove(pg); 1281 1282 /* 1283 * if our page was on loan, then we just lost control over it 1284 * (in fact, if it was loaned to an anon, the anon may have 1285 * already taken over ownership of the page by now and thus 1286 * changed the loan_count [e.g. in uvmfault_anonget()]) we just 1287 * return (when the last loan is dropped, then the page can be 1288 * freed by whatever was holding the last loan). 1289 */ 1290 1291 if (saved_loan_count) 1292 return; 1293 } else if (saved_loan_count && (pg->pqflags & PQ_ANON)) { 1294 1295 /* 1296 * if our page is owned by an anon and is loaned out to the 1297 * kernel then we just want to drop ownership and return. 1298 * the kernel must free the page when all its loans clear ... 1299 * note that the kernel can't change the loan status of our 1300 * page as long as we are holding PQ lock. 1301 */ 1302 1303 pg->pqflags &= ~PQ_ANON; 1304 pg->uanon = NULL; 1305 return; 1306 } 1307 KASSERT(saved_loan_count == 0); 1308 1309 /* 1310 * now remove the page from the queues 1311 */ 1312 1313 if (pg->pqflags & PQ_ACTIVE) { 1314 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1315 pg->pqflags &= ~PQ_ACTIVE; 1316 uvmexp.active--; 1317 } else if (pg->pqflags & PQ_INACTIVE) { 1318 TAILQ_REMOVE(&uvm.page_inactive, pg, pageq); 1319 pg->pqflags &= ~PQ_INACTIVE; 1320 uvmexp.inactive--; 1321 } 1322 1323 /* 1324 * if the page was wired, unwire it now. 1325 */ 1326 1327 if (pg->wire_count) { 1328 pg->wire_count = 0; 1329 uvmexp.wired--; 1330 } 1331 1332 if (pg->uanon) { 1333 uvmexp.anonpages--; 1334 } 1335 1336 /* 1337 * and put on free queue 1338 */ 1339 1340 pg->flags &= ~PG_ZERO; 1341 1342 s = uvm_lock_fpageq(); 1343 TAILQ_INSERT_TAIL(&uvm.page_free[ 1344 uvm_page_lookup_freelist(pg)].pgfl_buckets[ 1345 VM_PGCOLOR_BUCKET(pg)].pgfl_queues[PGFL_UNKNOWN], pg, pageq); 1346 pg->pqflags = PQ_FREE; 1347#ifdef DEBUG 1348 pg->uobject = (void *)0xdeadbeef; 1349 pg->offset = 0xdeadbeef; 1350 pg->uanon = (void *)0xdeadbeef; 1351#endif 1352 uvmexp.free++; 1353 1354 if (uvmexp.zeropages < UVM_PAGEZERO_TARGET) 1355 uvm.page_idle_zero = vm_page_zero_enable; 1356 1357 uvm_unlock_fpageq(s); 1358} 1359 1360/* 1361 * uvm_page_unbusy: unbusy an array of pages. 1362 * 1363 * => pages must either all belong to the same object, or all belong to anons. 1364 * => if pages are object-owned, object must be locked. 1365 * => if pages are anon-owned, anons must be unlockd and have 0 refcount. 1366 */ 1367 1368void 1369uvm_page_unbusy(pgs, npgs) 1370 struct vm_page **pgs; 1371 int npgs; 1372{ 1373 struct vm_page *pg; 1374 struct uvm_object *uobj; 1375 int i; 1376 UVMHIST_FUNC("uvm_page_unbusy"); UVMHIST_CALLED(ubchist); 1377 1378 for (i = 0; i < npgs; i++) { 1379 pg = pgs[i]; 1380 1381 if (pg == NULL) { 1382 continue; 1383 } 1384 if (pg->flags & PG_WANTED) { 1385 wakeup(pg); 1386 } 1387 if (pg->flags & PG_RELEASED) { 1388 UVMHIST_LOG(ubchist, "releasing pg %p", pg,0,0,0); 1389 uobj = pg->uobject; 1390 if (uobj != NULL) { 1391 uobj->pgops->pgo_releasepg(pg, NULL); 1392 } else { 1393 pg->flags &= ~(PG_BUSY); 1394 UVM_PAGE_OWN(pg, NULL); 1395 uvm_anfree(pg->uanon); 1396 } 1397 } else { 1398 UVMHIST_LOG(ubchist, "unbusying pg %p", pg,0,0,0); 1399 KASSERT(pg->wire_count || 1400 (pg->pqflags & (PQ_ACTIVE|PQ_INACTIVE))); 1401 pg->flags &= ~(PG_WANTED|PG_BUSY); 1402 UVM_PAGE_OWN(pg, NULL); 1403 } 1404 } 1405} 1406 1407#if defined(UVM_PAGE_TRKOWN) 1408/* 1409 * uvm_page_own: set or release page ownership 1410 * 1411 * => this is a debugging function that keeps track of who sets PG_BUSY 1412 * and where they do it. it can be used to track down problems 1413 * such a process setting "PG_BUSY" and never releasing it. 1414 * => page's object [if any] must be locked 1415 * => if "tag" is NULL then we are releasing page ownership 1416 */ 1417void 1418uvm_page_own(pg, tag) 1419 struct vm_page *pg; 1420 char *tag; 1421{ 1422 /* gain ownership? */ 1423 if (tag) { 1424 if (pg->owner_tag) { 1425 printf("uvm_page_own: page %p already owned " 1426 "by proc %d [%s]\n", pg, 1427 pg->owner, pg->owner_tag); 1428 panic("uvm_page_own"); 1429 } 1430 pg->owner = (curproc) ? curproc->p_pid : (pid_t) -1; 1431 pg->owner_tag = tag; 1432 return; 1433 } 1434 1435 /* drop ownership */ 1436 if (pg->owner_tag == NULL) { 1437 printf("uvm_page_own: dropping ownership of an non-owned " 1438 "page (%p)\n", pg); 1439 panic("uvm_page_own"); 1440 } 1441 pg->owner_tag = NULL; 1442 return; 1443} 1444#endif 1445 1446/* 1447 * uvm_pageidlezero: zero free pages while the system is idle. 1448 * 1449 * => try to complete one color bucket at a time, to reduce our impact 1450 * on the CPU cache. 1451 * => we loop until we either reach the target or whichqs indicates that 1452 * there is a process ready to run. 1453 */ 1454void 1455uvm_pageidlezero() 1456{ 1457 struct vm_page *pg; 1458 struct pgfreelist *pgfl; 1459 int free_list, s, firstbucket; 1460 static int nextbucket; 1461 1462 s = uvm_lock_fpageq(); 1463 1464 firstbucket = nextbucket; 1465 do { 1466 if (whichqs != 0) { 1467 uvm_unlock_fpageq(s); 1468 return; 1469 } 1470 1471 if (uvmexp.zeropages >= UVM_PAGEZERO_TARGET) { 1472 uvm.page_idle_zero = FALSE; 1473 uvm_unlock_fpageq(s); 1474 return; 1475 } 1476 1477 for (free_list = 0; free_list < VM_NFREELIST; free_list++) { 1478 pgfl = &uvm.page_free[free_list]; 1479 while ((pg = TAILQ_FIRST(&pgfl->pgfl_buckets[ 1480 nextbucket].pgfl_queues[PGFL_UNKNOWN])) != NULL) { 1481 if (whichqs != 0) { 1482 uvm_unlock_fpageq(s); 1483 return; 1484 } 1485 1486 TAILQ_REMOVE(&pgfl->pgfl_buckets[ 1487 nextbucket].pgfl_queues[PGFL_UNKNOWN], 1488 pg, pageq); 1489 uvmexp.free--; 1490 uvm_unlock_fpageq(s); 1491#ifdef PMAP_PAGEIDLEZERO 1492 if (PMAP_PAGEIDLEZERO(VM_PAGE_TO_PHYS(pg)) == 1493 FALSE) { 1494 /* 1495 * The machine-dependent code detected 1496 * some reason for us to abort zeroing 1497 * pages, probably because there is a 1498 * process now ready to run. 1499 */ 1500 s = uvm_lock_fpageq(); 1501 TAILQ_INSERT_HEAD(&pgfl->pgfl_buckets[ 1502 nextbucket].pgfl_queues[ 1503 PGFL_UNKNOWN], pg, pageq); 1504 uvmexp.free++; 1505 uvmexp.zeroaborts++; 1506 uvm_unlock_fpageq(s); 1507 return; 1508 } 1509#else 1510 pmap_zero_page(VM_PAGE_TO_PHYS(pg)); 1511#endif /* PMAP_PAGEIDLEZERO */ 1512 pg->flags |= PG_ZERO; 1513 1514 s = uvm_lock_fpageq(); 1515 TAILQ_INSERT_HEAD(&pgfl->pgfl_buckets[ 1516 nextbucket].pgfl_queues[PGFL_ZEROS], 1517 pg, pageq); 1518 uvmexp.free++; 1519 uvmexp.zeropages++; 1520 } 1521 } 1522 1523 nextbucket = (nextbucket + 1) & uvmexp.colormask; 1524 } while (nextbucket != firstbucket); 1525 1526 uvm_unlock_fpageq(s); 1527} 1528