vm_page.c revision 45347
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 37 * $Id: vm_page.c,v 1.128 1999/03/19 05:21:03 alc Exp $ 38 */ 39 40/* 41 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 42 * All rights reserved. 43 * 44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 45 * 46 * Permission to use, copy, modify and distribute this software and 47 * its documentation is hereby granted, provided that both the copyright 48 * notice and this permission notice appear in all copies of the 49 * software, derivative works or modified versions, and any portions 50 * thereof, and that both notices appear in supporting documentation. 51 * 52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 53 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 55 * 56 * Carnegie Mellon requests users of this software to return to 57 * 58 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 59 * School of Computer Science 60 * Carnegie Mellon University 61 * Pittsburgh PA 15213-3890 62 * 63 * any improvements or extensions that they make and grant Carnegie the 64 * rights to redistribute these changes. 65 */ 66 67/* 68 * Resident memory management module. 69 */ 70 71#include <sys/param.h> 72#include <sys/systm.h> 73#include <sys/malloc.h> 74#include <sys/proc.h> 75#include <sys/vmmeter.h> 76#include <sys/vnode.h> 77 78#include <vm/vm.h> 79#include <vm/vm_param.h> 80#include <vm/vm_prot.h> 81#include <sys/lock.h> 82#include <vm/vm_kern.h> 83#include <vm/vm_object.h> 84#include <vm/vm_page.h> 85#include <vm/vm_pageout.h> 86#include <vm/vm_pager.h> 87#include <vm/vm_extern.h> 88 89static void vm_page_queue_init __P((void)); 90static vm_page_t vm_page_select_cache __P((vm_object_t, vm_pindex_t)); 91 92/* 93 * Associated with page of user-allocatable memory is a 94 * page structure. 95 */ 96 97static struct vm_page **vm_page_buckets; /* Array of buckets */ 98static int vm_page_bucket_count; /* How big is array? */ 99static int vm_page_hash_mask; /* Mask for hash function */ 100static volatile int vm_page_bucket_generation; 101 102struct pglist vm_page_queue_free[PQ_L2_SIZE] = {{0}}; 103struct pglist vm_page_queue_active = {0}; 104struct pglist vm_page_queue_inactive = {0}; 105struct pglist vm_page_queue_cache[PQ_L2_SIZE] = {{0}}; 106 107static int no_queue=0; 108 109struct vpgqueues vm_page_queues[PQ_COUNT] = {{0}}; 110static int pqcnt[PQ_COUNT] = {0}; 111 112static void 113vm_page_queue_init(void) { 114 int i; 115 116 vm_page_queues[PQ_NONE].pl = NULL; 117 vm_page_queues[PQ_NONE].cnt = &no_queue; 118 for(i=0;i<PQ_L2_SIZE;i++) { 119 vm_page_queues[PQ_FREE+i].pl = &vm_page_queue_free[i]; 120 vm_page_queues[PQ_FREE+i].cnt = &cnt.v_free_count; 121 } 122 vm_page_queues[PQ_INACTIVE].pl = &vm_page_queue_inactive; 123 vm_page_queues[PQ_INACTIVE].cnt = &cnt.v_inactive_count; 124 125 vm_page_queues[PQ_ACTIVE].pl = &vm_page_queue_active; 126 vm_page_queues[PQ_ACTIVE].cnt = &cnt.v_active_count; 127 for(i=0;i<PQ_L2_SIZE;i++) { 128 vm_page_queues[PQ_CACHE+i].pl = &vm_page_queue_cache[i]; 129 vm_page_queues[PQ_CACHE+i].cnt = &cnt.v_cache_count; 130 } 131 for(i=0;i<PQ_COUNT;i++) { 132 if (vm_page_queues[i].pl) { 133 TAILQ_INIT(vm_page_queues[i].pl); 134 } else if (i != 0) { 135 panic("vm_page_queue_init: queue %d is null", i); 136 } 137 vm_page_queues[i].lcnt = &pqcnt[i]; 138 } 139} 140 141vm_page_t vm_page_array = 0; 142static int vm_page_array_size = 0; 143long first_page = 0; 144static long last_page; 145static vm_size_t page_mask; 146static int page_shift; 147int vm_page_zero_count = 0; 148 149static __inline int vm_page_hash __P((vm_object_t object, vm_pindex_t pindex)); 150static void vm_page_free_wakeup __P((void)); 151 152/* 153 * vm_set_page_size: 154 * 155 * Sets the page size, perhaps based upon the memory 156 * size. Must be called before any use of page-size 157 * dependent functions. 158 * 159 * Sets page_shift and page_mask from cnt.v_page_size. 160 */ 161void 162vm_set_page_size() 163{ 164 165 if (cnt.v_page_size == 0) 166 cnt.v_page_size = DEFAULT_PAGE_SIZE; 167 page_mask = cnt.v_page_size - 1; 168 if ((page_mask & cnt.v_page_size) != 0) 169 panic("vm_set_page_size: page size not a power of two"); 170 for (page_shift = 0;; page_shift++) 171 if ((1 << page_shift) == cnt.v_page_size) 172 break; 173} 174 175/* 176 * vm_page_startup: 177 * 178 * Initializes the resident memory module. 179 * 180 * Allocates memory for the page cells, and 181 * for the object/offset-to-page hash table headers. 182 * Each page cell is initialized and placed on the free list. 183 */ 184 185vm_offset_t 186vm_page_startup(starta, enda, vaddr) 187 register vm_offset_t starta; 188 vm_offset_t enda; 189 register vm_offset_t vaddr; 190{ 191 register vm_offset_t mapped; 192 register vm_page_t m; 193 register struct vm_page **bucket; 194 vm_size_t npages, page_range; 195 register vm_offset_t new_start; 196 int i; 197 vm_offset_t pa; 198 int nblocks; 199 vm_offset_t first_managed_page; 200 201 /* the biggest memory array is the second group of pages */ 202 vm_offset_t start; 203 vm_offset_t biggestone, biggestsize; 204 205 vm_offset_t total; 206 207 total = 0; 208 biggestsize = 0; 209 biggestone = 0; 210 nblocks = 0; 211 vaddr = round_page(vaddr); 212 213 for (i = 0; phys_avail[i + 1]; i += 2) { 214 phys_avail[i] = round_page(phys_avail[i]); 215 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]); 216 } 217 218 for (i = 0; phys_avail[i + 1]; i += 2) { 219 int size = phys_avail[i + 1] - phys_avail[i]; 220 221 if (size > biggestsize) { 222 biggestone = i; 223 biggestsize = size; 224 } 225 ++nblocks; 226 total += size; 227 } 228 229 start = phys_avail[biggestone]; 230 231 /* 232 * Initialize the queue headers for the free queue, the active queue 233 * and the inactive queue. 234 */ 235 236 vm_page_queue_init(); 237 238 /* 239 * Allocate (and initialize) the hash table buckets. 240 * 241 * The number of buckets MUST BE a power of 2, and the actual value is 242 * the next power of 2 greater than the number of physical pages in 243 * the system. 244 * 245 * We make the hash table approximately 2x the number of pages to 246 * reduce the chain length. This is about the same size using the 247 * singly-linked list as the 1x hash table we were using before 248 * using TAILQ but the chain length will be smaller. 249 * 250 * Note: This computation can be tweaked if desired. 251 */ 252 vm_page_buckets = (struct vm_page **)vaddr; 253 bucket = vm_page_buckets; 254 if (vm_page_bucket_count == 0) { 255 vm_page_bucket_count = 1; 256 while (vm_page_bucket_count < atop(total)) 257 vm_page_bucket_count <<= 1; 258 } 259 vm_page_bucket_count <<= 1; 260 vm_page_hash_mask = vm_page_bucket_count - 1; 261 262 /* 263 * Validate these addresses. 264 */ 265 266 new_start = start + vm_page_bucket_count * sizeof(struct vm_page *); 267 new_start = round_page(new_start); 268 mapped = round_page(vaddr); 269 vaddr = pmap_map(mapped, start, new_start, 270 VM_PROT_READ | VM_PROT_WRITE); 271 start = new_start; 272 vaddr = round_page(vaddr); 273 bzero((caddr_t) mapped, vaddr - mapped); 274 275 for (i = 0; i < vm_page_bucket_count; i++) { 276 *bucket = NULL; 277 bucket++; 278 } 279 280 /* 281 * Compute the number of pages of memory that will be available for 282 * use (taking into account the overhead of a page structure per 283 * page). 284 */ 285 286 first_page = phys_avail[0] / PAGE_SIZE; 287 last_page = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE; 288 289 page_range = last_page - (phys_avail[0] / PAGE_SIZE); 290 npages = (total - (page_range * sizeof(struct vm_page)) - 291 (start - phys_avail[biggestone])) / PAGE_SIZE; 292 293 /* 294 * Initialize the mem entry structures now, and put them in the free 295 * queue. 296 */ 297 vm_page_array = (vm_page_t) vaddr; 298 mapped = vaddr; 299 300 /* 301 * Validate these addresses. 302 */ 303 new_start = round_page(start + page_range * sizeof(struct vm_page)); 304 mapped = pmap_map(mapped, start, new_start, 305 VM_PROT_READ | VM_PROT_WRITE); 306 start = new_start; 307 308 first_managed_page = start / PAGE_SIZE; 309 310 /* 311 * Clear all of the page structures 312 */ 313 bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page)); 314 vm_page_array_size = page_range; 315 316 /* 317 * Construct the free queue(s) in descending order (by physical 318 * address) so that the first 16MB of physical memory is allocated 319 * last rather than first. On large-memory machines, this avoids 320 * the exhaustion of low physical memory before isa_dmainit has run. 321 */ 322 cnt.v_page_count = 0; 323 cnt.v_free_count = 0; 324 for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) { 325 if (i == biggestone) 326 pa = ptoa(first_managed_page); 327 else 328 pa = phys_avail[i]; 329 while (pa < phys_avail[i + 1] && npages-- > 0) { 330 ++cnt.v_page_count; 331 ++cnt.v_free_count; 332 m = PHYS_TO_VM_PAGE(pa); 333 m->phys_addr = pa; 334 m->flags = 0; 335 m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK; 336 m->queue = m->pc + PQ_FREE; 337 TAILQ_INSERT_HEAD(vm_page_queues[m->queue].pl, m, pageq); 338 ++(*vm_page_queues[m->queue].lcnt); 339 pa += PAGE_SIZE; 340 } 341 } 342 return (mapped); 343} 344 345/* 346 * vm_page_hash: 347 * 348 * Distributes the object/offset key pair among hash buckets. 349 * 350 * NOTE: This macro depends on vm_page_bucket_count being a power of 2. 351 * This routine may not block. 352 * 353 * We try to randomize the hash based on the object to spread the pages 354 * out in the hash table without it costing us too much. 355 */ 356static __inline int 357vm_page_hash(object, pindex) 358 vm_object_t object; 359 vm_pindex_t pindex; 360{ 361 int i = ((uintptr_t)object + pindex) ^ object->hash_rand; 362 363 return(i & vm_page_hash_mask); 364} 365 366/* 367 * vm_page_insert: [ internal use only ] 368 * 369 * Inserts the given mem entry into the object and object list. 370 * 371 * The pagetables are not updated but will presumably fault the page 372 * in if necessary, or if a kernel page the caller will at some point 373 * enter the page into the kernel's pmap. We are not allowed to block 374 * here so we *can't* do this anyway. 375 * 376 * The object and page must be locked, and must be splhigh. 377 * This routine may not block. 378 */ 379 380void 381vm_page_insert(m, object, pindex) 382 register vm_page_t m; 383 register vm_object_t object; 384 register vm_pindex_t pindex; 385{ 386 register struct vm_page **bucket; 387 388 if (m->object != NULL) 389 panic("vm_page_insert: already inserted"); 390 391 /* 392 * Record the object/offset pair in this page 393 */ 394 395 m->object = object; 396 m->pindex = pindex; 397 398 /* 399 * Insert it into the object_object/offset hash table 400 */ 401 402 bucket = &vm_page_buckets[vm_page_hash(object, pindex)]; 403 m->hnext = *bucket; 404 *bucket = m; 405 vm_page_bucket_generation++; 406 407 /* 408 * Now link into the object's list of backed pages. 409 */ 410 411 TAILQ_INSERT_TAIL(&object->memq, m, listq); 412 m->object->generation++; 413 414 if (m->wire_count) 415 object->wire_count++; 416 417 if ((m->queue - m->pc) == PQ_CACHE) 418 object->cache_count++; 419 420 /* 421 * show that the object has one more resident page. 422 */ 423 424 object->resident_page_count++; 425 426 /* 427 * Since we are inserting a new and possibly dirty page, 428 * update the object's OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY flags. 429 */ 430 if (m->flags & PG_WRITEABLE) 431 vm_object_set_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY); 432} 433 434/* 435 * vm_page_remove: 436 * NOTE: used by device pager as well -wfj 437 * 438 * Removes the given mem entry from the object/offset-page 439 * table and the object page list, but do not invalidate/terminate 440 * the backing store. 441 * 442 * The object and page must be locked, and at splhigh. 443 * The underlying pmap entry (if any) is NOT removed here. 444 * This routine may not block. 445 */ 446 447void 448vm_page_remove(m) 449 vm_page_t m; 450{ 451 vm_object_t object; 452 453 if (m->object == NULL) 454 return; 455 456#if !defined(MAX_PERF) 457 if ((m->flags & PG_BUSY) == 0) { 458 panic("vm_page_remove: page not busy"); 459 } 460#endif 461 462 /* 463 * Basically destroy the page. 464 */ 465 466 vm_page_wakeup(m); 467 468 object = m->object; 469 470 if (m->wire_count) 471 object->wire_count--; 472 473 if ((m->queue - m->pc) == PQ_CACHE) 474 object->cache_count--; 475 476 /* 477 * Remove from the object_object/offset hash table. The object 478 * must be on the hash queue, we will panic if it isn't 479 * 480 * Note: we must NULL-out m->hnext to prevent loops in detached 481 * buffers with vm_page_lookup(). 482 */ 483 484 { 485 struct vm_page **bucket; 486 487 bucket = &vm_page_buckets[vm_page_hash(m->object, m->pindex)]; 488 while (*bucket != m) { 489#if !defined(MAX_PERF) 490 if (*bucket == NULL) 491 panic("vm_page_remove(): page not found in hash"); 492#endif 493 bucket = &(*bucket)->hnext; 494 } 495 *bucket = m->hnext; 496 m->hnext = NULL; 497 vm_page_bucket_generation++; 498 } 499 500 /* 501 * Now remove from the object's list of backed pages. 502 */ 503 504 TAILQ_REMOVE(&object->memq, m, listq); 505 506 /* 507 * And show that the object has one fewer resident page. 508 */ 509 510 object->resident_page_count--; 511 object->generation++; 512 513 m->object = NULL; 514} 515 516/* 517 * vm_page_lookup: 518 * 519 * Returns the page associated with the object/offset 520 * pair specified; if none is found, NULL is returned. 521 * 522 * NOTE: the code below does not lock. It will operate properly if 523 * an interrupt makes a change, but the generation algorithm will not 524 * operate properly in an SMP environment where both cpu's are able to run 525 * kernel code simultaniously. 526 * 527 * The object must be locked. No side effects. 528 * This routine may not block. 529 * This is a critical path routine 530 */ 531 532vm_page_t 533vm_page_lookup(object, pindex) 534 register vm_object_t object; 535 register vm_pindex_t pindex; 536{ 537 register vm_page_t m; 538 register struct vm_page **bucket; 539 int generation; 540 541 /* 542 * Search the hash table for this object/offset pair 543 */ 544 545retry: 546 generation = vm_page_bucket_generation; 547 bucket = &vm_page_buckets[vm_page_hash(object, pindex)]; 548 for (m = *bucket; m != NULL; m = m->hnext) { 549 if ((m->object == object) && (m->pindex == pindex)) { 550 if (vm_page_bucket_generation != generation) 551 goto retry; 552 return (m); 553 } 554 } 555 if (vm_page_bucket_generation != generation) 556 goto retry; 557 return (NULL); 558} 559 560/* 561 * vm_page_rename: 562 * 563 * Move the given memory entry from its 564 * current object to the specified target object/offset. 565 * 566 * The object must be locked. 567 * This routine may not block. 568 * 569 * Note: this routine will raise itself to splvm(), the caller need not. 570 * 571 * Note: swap associated with the page must be invalidated by the move. We 572 * have to do this for several reasons: (1) we aren't freeing the 573 * page, (2) we are dirtying the page, (3) the VM system is probably 574 * moving the page from object A to B, and will then later move 575 * the backing store from A to B and we can't have a conflict. 576 * 577 * Note: we *always* dirty the page. It is necessary both for the 578 * fact that we moved it, and because we may be invalidating 579 * swap. If the page is on the cache, we have to deactivate it 580 * or vm_page_dirty() will panic. Dirty pages are not allowed 581 * on the cache. 582 */ 583 584void 585vm_page_rename(m, new_object, new_pindex) 586 register vm_page_t m; 587 register vm_object_t new_object; 588 vm_pindex_t new_pindex; 589{ 590 int s; 591 592 s = splvm(); 593 vm_page_remove(m); 594 vm_page_insert(m, new_object, new_pindex); 595 if (m->queue - m->pc == PQ_CACHE) 596 vm_page_deactivate(m); 597 vm_page_dirty(m); 598 splx(s); 599} 600 601/* 602 * vm_page_unqueue_nowakeup: 603 * 604 * vm_page_unqueue() without any wakeup 605 * 606 * This routine must be called at splhigh(). 607 * This routine may not block. 608 */ 609 610void 611vm_page_unqueue_nowakeup(m) 612 vm_page_t m; 613{ 614 int queue = m->queue; 615 struct vpgqueues *pq; 616 if (queue != PQ_NONE) { 617 pq = &vm_page_queues[queue]; 618 m->queue = PQ_NONE; 619 TAILQ_REMOVE(pq->pl, m, pageq); 620 (*pq->cnt)--; 621 (*pq->lcnt)--; 622 if ((queue - m->pc) == PQ_CACHE) { 623 if (m->object) 624 m->object->cache_count--; 625 } 626 } 627} 628 629/* 630 * vm_page_unqueue: 631 * 632 * Remove a page from its queue. 633 * 634 * This routine must be called at splhigh(). 635 * This routine may not block. 636 */ 637 638void 639vm_page_unqueue(m) 640 vm_page_t m; 641{ 642 int queue = m->queue; 643 struct vpgqueues *pq; 644 if (queue != PQ_NONE) { 645 m->queue = PQ_NONE; 646 pq = &vm_page_queues[queue]; 647 TAILQ_REMOVE(pq->pl, m, pageq); 648 (*pq->cnt)--; 649 (*pq->lcnt)--; 650 if ((queue - m->pc) == PQ_CACHE) { 651 if ((cnt.v_cache_count + cnt.v_free_count) < 652 (cnt.v_free_reserved + cnt.v_cache_min)) 653 pagedaemon_wakeup(); 654 if (m->object) 655 m->object->cache_count--; 656 } 657 } 658} 659 660#if PQ_L2_SIZE > 1 661 662/* 663 * vm_page_list_find: 664 * 665 * Find a page on the specified queue with color optimization. 666 * 667 * The page coloring optimization attempts to locate a page 668 * that does not overload other nearby pages in the object in 669 * the cpu's L1 or L2 caches. We need this optmization because 670 * cpu caches tend to be physical caches, while object spaces tend 671 * to be virtual. 672 * 673 * This routine must be called at splvm(). 674 * This routine may not block. 675 * 676 * This routine may only be called from the vm_page_list_find() macro 677 * in vm_page.h 678 */ 679vm_page_t 680_vm_page_list_find(basequeue, index) 681 int basequeue, index; 682{ 683 int i; 684 vm_page_t m = NULL; 685 struct vpgqueues *pq; 686 687 pq = &vm_page_queues[basequeue]; 688 689 /* 690 * Note that for the first loop, index+i and index-i wind up at the 691 * same place. Even though this is not totally optimal, we've already 692 * blown it by missing the cache case so we do not care. 693 */ 694 695 for(i = PQ_L2_SIZE / 2; i > 0; --i) { 696 if ((m = TAILQ_FIRST(pq[(index + i) & PQ_L2_MASK].pl)) != NULL) 697 break; 698 699 if ((m = TAILQ_FIRST(pq[(index - i) & PQ_L2_MASK].pl)) != NULL) 700 break; 701 } 702 return(m); 703} 704 705#endif 706 707/* 708 * vm_page_select_cache: 709 * 710 * Find a page on the cache queue with color optimization. As pages 711 * might be found, but not applicable, they are deactivated. This 712 * keeps us from using potentially busy cached pages. 713 * 714 * This routine must be called at splvm(). 715 * This routine may not block. 716 */ 717vm_page_t 718vm_page_select_cache(object, pindex) 719 vm_object_t object; 720 vm_pindex_t pindex; 721{ 722 vm_page_t m; 723 724 while (TRUE) { 725 m = vm_page_list_find( 726 PQ_CACHE, 727 (pindex + object->pg_color) & PQ_L2_MASK, 728 FALSE 729 ); 730 if (m && ((m->flags & PG_BUSY) || m->busy || 731 m->hold_count || m->wire_count)) { 732 vm_page_deactivate(m); 733 continue; 734 } 735 return m; 736 } 737} 738 739/* 740 * vm_page_select_free: 741 * 742 * Find a free or zero page, with specified preference. We attempt to 743 * inline the nominal case and fall back to _vm_page_select_free() 744 * otherwise. 745 * 746 * This routine must be called at splvm(). 747 * This routine may not block. 748 */ 749 750static __inline vm_page_t 751vm_page_select_free(vm_object_t object, vm_pindex_t pindex, boolean_t prefer_zero) 752{ 753 vm_page_t m; 754 755 m = vm_page_list_find( 756 PQ_FREE, 757 (pindex + object->pg_color) & PQ_L2_MASK, 758 prefer_zero 759 ); 760 return(m); 761} 762 763/* 764 * vm_page_alloc: 765 * 766 * Allocate and return a memory cell associated 767 * with this VM object/offset pair. 768 * 769 * page_req classes: 770 * VM_ALLOC_NORMAL normal process request 771 * VM_ALLOC_SYSTEM system *really* needs a page 772 * VM_ALLOC_INTERRUPT interrupt time request 773 * VM_ALLOC_ZERO zero page 774 * 775 * Object must be locked. 776 * This routine may not block. 777 * 778 * Additional special handling is required when called from an 779 * interrupt (VM_ALLOC_INTERRUPT). We are not allowed to mess with 780 * the page cache in this case. 781 */ 782 783vm_page_t 784vm_page_alloc(object, pindex, page_req) 785 vm_object_t object; 786 vm_pindex_t pindex; 787 int page_req; 788{ 789 register vm_page_t m = NULL; 790 int s; 791 792 KASSERT(!vm_page_lookup(object, pindex), 793 ("vm_page_alloc: page already allocated")); 794 795 /* 796 * The pager is allowed to eat deeper into the free page list. 797 */ 798 799 if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) { 800 page_req = VM_ALLOC_SYSTEM; 801 }; 802 803 s = splvm(); 804 805loop: 806 if (cnt.v_free_count > cnt.v_free_reserved) { 807 /* 808 * Allocate from the free queue if there are plenty of pages 809 * in it. 810 */ 811 if (page_req == VM_ALLOC_ZERO) 812 m = vm_page_select_free(object, pindex, TRUE); 813 else 814 m = vm_page_select_free(object, pindex, FALSE); 815 } else if ( 816 (page_req == VM_ALLOC_SYSTEM && 817 cnt.v_cache_count == 0 && 818 cnt.v_free_count > cnt.v_interrupt_free_min) || 819 (page_req == VM_ALLOC_INTERRUPT && cnt.v_free_count > 0) 820 ) { 821 /* 822 * Interrupt or system, dig deeper into the free list. 823 */ 824 m = vm_page_select_free(object, pindex, FALSE); 825 } else if (page_req != VM_ALLOC_INTERRUPT) { 826 /* 827 * Allocateable from cache (non-interrupt only). On success, 828 * we must free the page and try again, thus ensuring that 829 * cnt.v_*_free_min counters are replenished. 830 */ 831 m = vm_page_select_cache(object, pindex); 832 if (m == NULL) { 833 splx(s); 834#if defined(DIAGNOSTIC) 835 if (cnt.v_cache_count > 0) 836 printf("vm_page_alloc(NORMAL): missing pages on cache queue: %d\n", cnt.v_cache_count); 837#endif 838 vm_pageout_deficit++; 839 pagedaemon_wakeup(); 840 return (NULL); 841 } 842 KASSERT(m->dirty == 0, ("Found dirty cache page %p", m)); 843 vm_page_busy(m); 844 vm_page_protect(m, VM_PROT_NONE); 845 vm_page_free(m); 846 goto loop; 847 } else { 848 /* 849 * Not allocateable from cache from interrupt, give up. 850 */ 851 splx(s); 852 vm_pageout_deficit++; 853 pagedaemon_wakeup(); 854 return (NULL); 855 } 856 857 /* 858 * At this point we had better have found a good page. 859 */ 860 861 KASSERT( 862 m != NULL, 863 ("vm_page_alloc(): missing page on free queue\n") 864 ); 865 866 /* 867 * Remove from free queue 868 */ 869 870 { 871 struct vpgqueues *pq = &vm_page_queues[m->queue]; 872 873 TAILQ_REMOVE(pq->pl, m, pageq); 874 (*pq->cnt)--; 875 (*pq->lcnt)--; 876 } 877 878 /* 879 * Initialize structure. Only the PG_ZERO flag is inherited. 880 */ 881 882 if (m->flags & PG_ZERO) { 883 vm_page_zero_count--; 884 m->flags = PG_ZERO | PG_BUSY; 885 } else { 886 m->flags = PG_BUSY; 887 } 888 m->wire_count = 0; 889 m->hold_count = 0; 890 m->act_count = 0; 891 m->busy = 0; 892 m->valid = 0; 893 m->dirty = 0; 894 m->queue = PQ_NONE; 895 896 /* 897 * vm_page_insert() is safe prior to the splx(). Note also that 898 * inserting a page here does not insert it into the pmap (which 899 * could cause us to block allocating memory). We cannot block 900 * anywhere. 901 */ 902 903 vm_page_insert(m, object, pindex); 904 905 /* 906 * Don't wakeup too often - wakeup the pageout daemon when 907 * we would be nearly out of memory. 908 */ 909 if (((cnt.v_free_count + cnt.v_cache_count) < 910 (cnt.v_free_reserved + cnt.v_cache_min)) || 911 (cnt.v_free_count < cnt.v_pageout_free_min)) 912 pagedaemon_wakeup(); 913 914 splx(s); 915 916 return (m); 917} 918 919/* 920 * vm_wait: (also see VM_WAIT macro) 921 * 922 * Block until free pages are available for allocation 923 */ 924 925void 926vm_wait() 927{ 928 int s; 929 930 s = splvm(); 931 if (curproc == pageproc) { 932 vm_pageout_pages_needed = 1; 933 tsleep(&vm_pageout_pages_needed, PSWP, "vmwait", 0); 934 } else { 935 if (!vm_pages_needed) { 936 vm_pages_needed++; 937 wakeup(&vm_pages_needed); 938 } 939 tsleep(&cnt.v_free_count, PVM, "vmwait", 0); 940 } 941 splx(s); 942} 943 944/* 945 * vm_await: (also see VM_AWAIT macro) 946 * 947 * asleep on an event that will signal when free pages are available 948 * for allocation. 949 */ 950 951void 952vm_await() 953{ 954 int s; 955 956 s = splvm(); 957 if (curproc == pageproc) { 958 vm_pageout_pages_needed = 1; 959 asleep(&vm_pageout_pages_needed, PSWP, "vmwait", 0); 960 } else { 961 if (!vm_pages_needed) { 962 vm_pages_needed++; 963 wakeup(&vm_pages_needed); 964 } 965 asleep(&cnt.v_free_count, PVM, "vmwait", 0); 966 } 967 splx(s); 968} 969 970#if 0 971/* 972 * vm_page_sleep: 973 * 974 * Block until page is no longer busy. 975 */ 976 977int 978vm_page_sleep(vm_page_t m, char *msg, char *busy) { 979 int slept = 0; 980 if ((busy && *busy) || (m->flags & PG_BUSY)) { 981 int s; 982 s = splvm(); 983 if ((busy && *busy) || (m->flags & PG_BUSY)) { 984 vm_page_flag_set(m, PG_WANTED); 985 tsleep(m, PVM, msg, 0); 986 slept = 1; 987 } 988 splx(s); 989 } 990 return slept; 991} 992 993#endif 994 995#if 0 996 997/* 998 * vm_page_asleep: 999 * 1000 * Similar to vm_page_sleep(), but does not block. Returns 0 if 1001 * the page is not busy, or 1 if the page is busy. 1002 * 1003 * This routine has the side effect of calling asleep() if the page 1004 * was busy (1 returned). 1005 */ 1006 1007int 1008vm_page_asleep(vm_page_t m, char *msg, char *busy) { 1009 int slept = 0; 1010 if ((busy && *busy) || (m->flags & PG_BUSY)) { 1011 int s; 1012 s = splvm(); 1013 if ((busy && *busy) || (m->flags & PG_BUSY)) { 1014 vm_page_flag_set(m, PG_WANTED); 1015 asleep(m, PVM, msg, 0); 1016 slept = 1; 1017 } 1018 splx(s); 1019 } 1020 return slept; 1021} 1022 1023#endif 1024 1025/* 1026 * vm_page_activate: 1027 * 1028 * Put the specified page on the active list (if appropriate). 1029 * 1030 * The page queues must be locked. 1031 * This routine may not block. 1032 */ 1033void 1034vm_page_activate(m) 1035 register vm_page_t m; 1036{ 1037 int s; 1038 1039 s = splvm(); 1040 if (m->queue != PQ_ACTIVE) { 1041 if ((m->queue - m->pc) == PQ_CACHE) 1042 cnt.v_reactivated++; 1043 1044 vm_page_unqueue(m); 1045 1046 if (m->wire_count == 0) { 1047 m->queue = PQ_ACTIVE; 1048 ++(*vm_page_queues[PQ_ACTIVE].lcnt); 1049 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 1050 if (m->act_count < ACT_INIT) 1051 m->act_count = ACT_INIT; 1052 cnt.v_active_count++; 1053 } 1054 } else { 1055 if (m->act_count < ACT_INIT) 1056 m->act_count = ACT_INIT; 1057 } 1058 1059 splx(s); 1060} 1061 1062/* 1063 * vm_page_free_wakeup: 1064 * 1065 * Helper routine for vm_page_free_toq() and vm_page_cache(). This 1066 * routine is called when a page has been added to the cache or free 1067 * queues. 1068 * 1069 * This routine may not block. 1070 * This routine must be called at splvm() 1071 */ 1072static __inline void 1073vm_page_free_wakeup() 1074{ 1075 /* 1076 * if pageout daemon needs pages, then tell it that there are 1077 * some free. 1078 */ 1079 if (vm_pageout_pages_needed) { 1080 wakeup(&vm_pageout_pages_needed); 1081 vm_pageout_pages_needed = 0; 1082 } 1083 /* 1084 * wakeup processes that are waiting on memory if we hit a 1085 * high water mark. And wakeup scheduler process if we have 1086 * lots of memory. this process will swapin processes. 1087 */ 1088 if (vm_pages_needed && 1089 ((cnt.v_free_count + cnt.v_cache_count) >= cnt.v_free_min)) { 1090 wakeup(&cnt.v_free_count); 1091 vm_pages_needed = 0; 1092 } 1093} 1094 1095/* 1096 * vm_page_free_toq: 1097 * 1098 * Returns the given page to the PQ_FREE or PQ_ZERO list, 1099 * disassociating it with any VM object. 1100 * 1101 * Object and page must be locked prior to entry. 1102 * This routine may not block. 1103 */ 1104 1105void 1106vm_page_free_toq(vm_page_t m) 1107{ 1108 int s; 1109 struct vpgqueues *pq; 1110 vm_object_t object = m->object; 1111 1112 s = splvm(); 1113 1114 cnt.v_tfree++; 1115 1116#if !defined(MAX_PERF) 1117 if (m->busy || ((m->queue - m->pc) == PQ_FREE) || 1118 (m->hold_count != 0)) { 1119 printf( 1120 "vm_page_free: pindex(%lu), busy(%d), PG_BUSY(%d), hold(%d)\n", 1121 (u_long)m->pindex, m->busy, (m->flags & PG_BUSY) ? 1 : 0, 1122 m->hold_count); 1123 if ((m->queue - m->pc) == PQ_FREE) 1124 panic("vm_page_free: freeing free page"); 1125 else 1126 panic("vm_page_free: freeing busy page"); 1127 } 1128#endif 1129 1130 /* 1131 * unqueue, then remove page. Note that we cannot destroy 1132 * the page here because we do not want to call the pager's 1133 * callback routine until after we've put the page on the 1134 * appropriate free queue. 1135 */ 1136 1137 vm_page_unqueue_nowakeup(m); 1138 vm_page_remove(m); 1139 1140 /* 1141 * If fictitious remove object association and 1142 * return, otherwise delay object association removal. 1143 */ 1144 1145 if ((m->flags & PG_FICTITIOUS) != 0) { 1146 splx(s); 1147 return; 1148 } 1149 1150 m->valid = 0; 1151 1152 if (m->wire_count != 0) { 1153#if !defined(MAX_PERF) 1154 if (m->wire_count > 1) { 1155 panic("vm_page_free: invalid wire count (%d), pindex: 0x%x", 1156 m->wire_count, m->pindex); 1157 } 1158#endif 1159 printf("vm_page_free: freeing wired page\n"); 1160 m->wire_count = 0; 1161 if (m->object) 1162 m->object->wire_count--; 1163 cnt.v_wire_count--; 1164 } 1165 1166 /* 1167 * If we've exhausted the object's resident pages we want to free 1168 * it up. 1169 */ 1170 1171 if (object && 1172 (object->type == OBJT_VNODE) && 1173 ((object->flags & OBJ_DEAD) == 0) 1174 ) { 1175 struct vnode *vp = (struct vnode *)object->handle; 1176 1177 if (vp && VSHOULDFREE(vp)) { 1178 if ((vp->v_flag & (VTBFREE|VDOOMED|VFREE)) == 0) { 1179 TAILQ_INSERT_TAIL(&vnode_tobefree_list, vp, v_freelist); 1180 vp->v_flag |= VTBFREE; 1181 } 1182 } 1183 } 1184 1185#ifdef __alpha__ 1186 pmap_page_is_free(m); 1187#endif 1188 1189 m->queue = PQ_FREE + m->pc; 1190 pq = &vm_page_queues[m->queue]; 1191 ++(*pq->lcnt); 1192 ++(*pq->cnt); 1193 1194 /* 1195 * Put zero'd pages on the end ( where we look for zero'd pages 1196 * first ) and non-zerod pages at the head. 1197 */ 1198 1199 if (m->flags & PG_ZERO) { 1200 TAILQ_INSERT_TAIL(pq->pl, m, pageq); 1201 ++vm_page_zero_count; 1202 } else if (curproc == pageproc) { 1203 /* 1204 * If the pageout daemon is freeing pages, the pages are 1205 * likely to NOT be in the L1 or L2 caches due to their age. 1206 * For now we do not try to do anything special with this 1207 * info. 1208 */ 1209 TAILQ_INSERT_HEAD(pq->pl, m, pageq); 1210 } else { 1211 TAILQ_INSERT_HEAD(pq->pl, m, pageq); 1212 } 1213 1214 vm_page_free_wakeup(); 1215 1216 splx(s); 1217} 1218 1219/* 1220 * vm_page_wire: 1221 * 1222 * Mark this page as wired down by yet 1223 * another map, removing it from paging queues 1224 * as necessary. 1225 * 1226 * The page queues must be locked. 1227 * This routine may not block. 1228 */ 1229void 1230vm_page_wire(m) 1231 register vm_page_t m; 1232{ 1233 int s; 1234 1235 s = splvm(); 1236 if (m->wire_count == 0) { 1237 vm_page_unqueue(m); 1238 cnt.v_wire_count++; 1239 if (m->object) 1240 m->object->wire_count++; 1241 } 1242 m->wire_count++; 1243 splx(s); 1244 (*vm_page_queues[PQ_NONE].lcnt)++; 1245 vm_page_flag_set(m, PG_MAPPED); 1246} 1247 1248/* 1249 * vm_page_unwire: 1250 * 1251 * Release one wiring of this page, potentially 1252 * enabling it to be paged again. 1253 * 1254 * Many pages placed on the inactive queue should actually go 1255 * into the cache, but it is difficult to figure out which. What 1256 * we do instead, if the inactive target is well met, is to put 1257 * clean pages at the head of the inactive queue instead of the tail. 1258 * This will cause them to be moved to the cache more quickly and 1259 * if not actively re-referenced, freed more quickly. If we just 1260 * stick these pages at the end of the inactive queue, heavy filesystem 1261 * meta-data accesses can cause an unnecessary paging load on memory bound 1262 * processes. This optimization causes one-time-use metadata to be 1263 * reused more quickly. 1264 * 1265 * A number of routines use vm_page_unwire() to guarentee that the page 1266 * will go into either the inactive or active queues, and will NEVER 1267 * be placed in the cache - for example, just after dirtying a page. 1268 * dirty pages in the cache are not allowed. 1269 * 1270 * The page queues must be locked. 1271 * This routine may not block. 1272 */ 1273void 1274vm_page_unwire(m, activate) 1275 register vm_page_t m; 1276 int activate; 1277{ 1278 int s; 1279 1280 s = splvm(); 1281 1282 if (m->wire_count > 0) { 1283 m->wire_count--; 1284 if (m->wire_count == 0) { 1285 if (m->object) 1286 m->object->wire_count--; 1287 cnt.v_wire_count--; 1288 if (activate) { 1289 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 1290 m->queue = PQ_ACTIVE; 1291 (*vm_page_queues[PQ_ACTIVE].lcnt)++; 1292 cnt.v_active_count++; 1293 } else { 1294 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 1295 m->queue = PQ_INACTIVE; 1296 (*vm_page_queues[PQ_INACTIVE].lcnt)++; 1297 cnt.v_inactive_count++; 1298 } 1299 } 1300 } else { 1301#if !defined(MAX_PERF) 1302 panic("vm_page_unwire: invalid wire count: %d\n", m->wire_count); 1303#endif 1304 } 1305 splx(s); 1306} 1307 1308 1309/* 1310 * Move the specified page to the inactive queue. If the page has 1311 * any associated swap, the swap is deallocated. 1312 * 1313 * This routine may not block. 1314 */ 1315void 1316vm_page_deactivate(m) 1317 register vm_page_t m; 1318{ 1319 int s; 1320 1321 /* 1322 * Ignore if already inactive. 1323 */ 1324 if (m->queue == PQ_INACTIVE) 1325 return; 1326 1327 s = splvm(); 1328 if (m->wire_count == 0) { 1329 if ((m->queue - m->pc) == PQ_CACHE) 1330 cnt.v_reactivated++; 1331 vm_page_unqueue(m); 1332 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 1333 m->queue = PQ_INACTIVE; 1334 ++(*vm_page_queues[PQ_INACTIVE].lcnt); 1335 cnt.v_inactive_count++; 1336 } 1337 splx(s); 1338} 1339 1340/* 1341 * vm_page_cache 1342 * 1343 * Put the specified page onto the page cache queue (if appropriate). 1344 * 1345 * This routine may not block. 1346 */ 1347void 1348vm_page_cache(m) 1349 register vm_page_t m; 1350{ 1351 int s; 1352 1353#if !defined(MAX_PERF) 1354 if ((m->flags & PG_BUSY) || m->busy || m->wire_count) { 1355 printf("vm_page_cache: attempting to cache busy page\n"); 1356 return; 1357 } 1358#endif 1359 if ((m->queue - m->pc) == PQ_CACHE) 1360 return; 1361 1362 /* 1363 * Remove all pmaps and indicate that the page is not 1364 * writeable or mapped. 1365 */ 1366 1367 vm_page_protect(m, VM_PROT_NONE); 1368#if !defined(MAX_PERF) 1369 if (m->dirty != 0) { 1370 panic("vm_page_cache: caching a dirty page, pindex: %d", m->pindex); 1371 } 1372#endif 1373 s = splvm(); 1374 vm_page_unqueue_nowakeup(m); 1375 m->queue = PQ_CACHE + m->pc; 1376 (*vm_page_queues[m->queue].lcnt)++; 1377 TAILQ_INSERT_TAIL(vm_page_queues[m->queue].pl, m, pageq); 1378 cnt.v_cache_count++; 1379 m->object->cache_count++; 1380 vm_page_free_wakeup(); 1381 splx(s); 1382} 1383 1384/* 1385 * Grab a page, waiting until we are waken up due to the page 1386 * changing state. We keep on waiting, if the page continues 1387 * to be in the object. If the page doesn't exist, allocate it. 1388 * 1389 * This routine may block. 1390 */ 1391vm_page_t 1392vm_page_grab(object, pindex, allocflags) 1393 vm_object_t object; 1394 vm_pindex_t pindex; 1395 int allocflags; 1396{ 1397 1398 vm_page_t m; 1399 int s, generation; 1400 1401retrylookup: 1402 if ((m = vm_page_lookup(object, pindex)) != NULL) { 1403 if (m->busy || (m->flags & PG_BUSY)) { 1404 generation = object->generation; 1405 1406 s = splvm(); 1407 while ((object->generation == generation) && 1408 (m->busy || (m->flags & PG_BUSY))) { 1409 vm_page_flag_set(m, PG_WANTED | PG_REFERENCED); 1410 tsleep(m, PVM, "pgrbwt", 0); 1411 if ((allocflags & VM_ALLOC_RETRY) == 0) { 1412 splx(s); 1413 return NULL; 1414 } 1415 } 1416 splx(s); 1417 goto retrylookup; 1418 } else { 1419 vm_page_busy(m); 1420 return m; 1421 } 1422 } 1423 1424 m = vm_page_alloc(object, pindex, allocflags & ~VM_ALLOC_RETRY); 1425 if (m == NULL) { 1426 VM_WAIT; 1427 if ((allocflags & VM_ALLOC_RETRY) == 0) 1428 return NULL; 1429 goto retrylookup; 1430 } 1431 1432 return m; 1433} 1434 1435/* 1436 * Mapping function for valid bits or for dirty bits in 1437 * a page. May not block. 1438 * 1439 * Inputs are required to range within a page. 1440 */ 1441 1442__inline int 1443vm_page_bits(int base, int size) 1444{ 1445 int first_bit; 1446 int last_bit; 1447 1448 KASSERT( 1449 base + size <= PAGE_SIZE, 1450 ("vm_page_bits: illegal base/size %d/%d", base, size) 1451 ); 1452 1453 if (size == 0) /* handle degenerate case */ 1454 return(0); 1455 1456 first_bit = base >> DEV_BSHIFT; 1457 last_bit = (base + size - 1) >> DEV_BSHIFT; 1458 1459 return ((2 << last_bit) - (1 << first_bit)); 1460} 1461 1462/* 1463 * set a page valid and clean. May not block. 1464 * 1465 * In order to maintain consistancy due to the DEV_BSIZE granularity 1466 * of the valid bits, we have to zero non-DEV_BSIZE aligned portions of 1467 * the page at the beginning and end of the valid range when the 1468 * associated valid bits are not already set. 1469 * 1470 * (base + size) must be less then or equal to PAGE_SIZE. 1471 */ 1472void 1473vm_page_set_validclean(m, base, size) 1474 vm_page_t m; 1475 int base; 1476 int size; 1477{ 1478 int pagebits; 1479 int frag; 1480 int endoff; 1481 1482 if (size == 0) /* handle degenerate case */ 1483 return; 1484 1485 /* 1486 * If the base is not DEV_BSIZE aligned and the valid 1487 * bit is clear, we have to zero out a portion of the 1488 * first block. 1489 */ 1490 1491 if ((frag = base & ~(DEV_BSIZE - 1)) != base && 1492 (m->valid & (1 << (base >> DEV_BSHIFT))) == 0 1493 ) { 1494 pmap_zero_page_area( 1495 VM_PAGE_TO_PHYS(m), 1496 frag, 1497 base - frag 1498 ); 1499 } 1500 1501 /* 1502 * If the ending offset is not DEV_BSIZE aligned and the 1503 * valid bit is clear, we have to zero out a portion of 1504 * the last block. 1505 */ 1506 1507 endoff = base + size; 1508 1509 if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff && 1510 (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0 1511 ) { 1512 pmap_zero_page_area( 1513 VM_PAGE_TO_PHYS(m), 1514 endoff, 1515 DEV_BSIZE - (endoff & (DEV_BSIZE - 1)) 1516 ); 1517 } 1518 1519 /* 1520 * Set valid, clear dirty bits. If validating the entire 1521 * page we can safely clear the pmap modify bit. 1522 */ 1523 1524 pagebits = vm_page_bits(base, size); 1525 m->valid |= pagebits; 1526 m->dirty &= ~pagebits; 1527 1528 if (base == 0 && size == PAGE_SIZE) 1529 pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 1530} 1531 1532/* 1533 * set a page (partially) invalid. May not block. 1534 */ 1535void 1536vm_page_set_invalid(m, base, size) 1537 vm_page_t m; 1538 int base; 1539 int size; 1540{ 1541 int bits; 1542 1543 m->valid &= ~(bits = vm_page_bits(base, size)); 1544 if (m->valid == 0) 1545 m->dirty &= ~bits; 1546 m->object->generation++; 1547} 1548 1549/* 1550 * vm_page_zero_invalid() 1551 * 1552 * The kernel assumes that the invalid portions of a page contain 1553 * garbage, but such pages can be mapped into memory by user code. 1554 * When this occurs, we must zero out the non-valid portions of the 1555 * page so user code sees what it expects. 1556 * 1557 * Pages are most often semi-valid when the end of a file is mapped 1558 * into memory and the file's size is not page aligned. 1559 */ 1560 1561void 1562vm_page_zero_invalid(vm_page_t m, boolean_t setvalid) 1563{ 1564 int b; 1565 int i; 1566 1567 /* 1568 * Scan the valid bits looking for invalid sections that 1569 * must be zerod. Invalid sub-DEV_BSIZE'd areas ( where the 1570 * valid bit may be set ) have already been zerod by 1571 * vm_page_set_validclean(). 1572 */ 1573 1574 for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) { 1575 if (i == (PAGE_SIZE / DEV_BSIZE) || 1576 (m->valid & (1 << i)) 1577 ) { 1578 if (i > b) { 1579 pmap_zero_page_area( 1580 VM_PAGE_TO_PHYS(m), 1581 b << DEV_BSHIFT, 1582 (i - b) << DEV_BSHIFT 1583 ); 1584 } 1585 b = i + 1; 1586 } 1587 } 1588 1589 /* 1590 * setvalid is TRUE when we can safely set the zero'd areas 1591 * as being valid. We can do this if there are no cache consistancy 1592 * issues. e.g. it is ok to do with UFS, but not ok to do with NFS. 1593 */ 1594 1595 if (setvalid) 1596 m->valid = VM_PAGE_BITS_ALL; 1597} 1598 1599/* 1600 * vm_page_is_valid: 1601 * 1602 * Is (partial) page valid? Note that the case where size == 0 1603 * will return FALSE in the degenerate case where the page is 1604 * entirely invalid, and TRUE otherwise. 1605 * 1606 * May not block. 1607 */ 1608 1609int 1610vm_page_is_valid(m, base, size) 1611 vm_page_t m; 1612 int base; 1613 int size; 1614{ 1615 int bits = vm_page_bits(base, size); 1616 1617 if (m->valid && ((m->valid & bits) == bits)) 1618 return 1; 1619 else 1620 return 0; 1621} 1622 1623/* 1624 * update dirty bits from pmap/mmu. May not block. 1625 */ 1626 1627void 1628vm_page_test_dirty(m) 1629 vm_page_t m; 1630{ 1631 if ((m->dirty != VM_PAGE_BITS_ALL) && 1632 pmap_is_modified(VM_PAGE_TO_PHYS(m))) { 1633 vm_page_dirty(m); 1634 } 1635} 1636 1637/* 1638 * This interface is for merging with malloc() someday. 1639 * Even if we never implement compaction so that contiguous allocation 1640 * works after initialization time, malloc()'s data structures are good 1641 * for statistics and for allocations of less than a page. 1642 */ 1643void * 1644contigmalloc1(size, type, flags, low, high, alignment, boundary, map) 1645 unsigned long size; /* should be size_t here and for malloc() */ 1646 struct malloc_type *type; 1647 int flags; 1648 unsigned long low; 1649 unsigned long high; 1650 unsigned long alignment; 1651 unsigned long boundary; 1652 vm_map_t map; 1653{ 1654 int i, s, start; 1655 vm_offset_t addr, phys, tmp_addr; 1656 int pass; 1657 vm_page_t pga = vm_page_array; 1658 1659 size = round_page(size); 1660#if !defined(MAX_PERF) 1661 if (size == 0) 1662 panic("contigmalloc1: size must not be 0"); 1663 if ((alignment & (alignment - 1)) != 0) 1664 panic("contigmalloc1: alignment must be a power of 2"); 1665 if ((boundary & (boundary - 1)) != 0) 1666 panic("contigmalloc1: boundary must be a power of 2"); 1667#endif 1668 1669 start = 0; 1670 for (pass = 0; pass <= 1; pass++) { 1671 s = splvm(); 1672again: 1673 /* 1674 * Find first page in array that is free, within range, aligned, and 1675 * such that the boundary won't be crossed. 1676 */ 1677 for (i = start; i < cnt.v_page_count; i++) { 1678 int pqtype; 1679 phys = VM_PAGE_TO_PHYS(&pga[i]); 1680 pqtype = pga[i].queue - pga[i].pc; 1681 if (((pqtype == PQ_FREE) || (pqtype == PQ_CACHE)) && 1682 (phys >= low) && (phys < high) && 1683 ((phys & (alignment - 1)) == 0) && 1684 (((phys ^ (phys + size - 1)) & ~(boundary - 1)) == 0)) 1685 break; 1686 } 1687 1688 /* 1689 * If the above failed or we will exceed the upper bound, fail. 1690 */ 1691 if ((i == cnt.v_page_count) || 1692 ((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) { 1693 vm_page_t m, next; 1694 1695again1: 1696 for (m = TAILQ_FIRST(&vm_page_queue_inactive); 1697 m != NULL; 1698 m = next) { 1699 1700 if (m->queue != PQ_INACTIVE) { 1701 break; 1702 } 1703 1704 next = TAILQ_NEXT(m, pageq); 1705 if (vm_page_sleep_busy(m, TRUE, "vpctw0")) 1706 goto again1; 1707 vm_page_test_dirty(m); 1708 if (m->dirty) { 1709 if (m->object->type == OBJT_VNODE) { 1710 vn_lock(m->object->handle, LK_EXCLUSIVE | LK_RETRY, curproc); 1711 vm_object_page_clean(m->object, 0, 0, OBJPC_SYNC); 1712 VOP_UNLOCK(m->object->handle, 0, curproc); 1713 goto again1; 1714 } else if (m->object->type == OBJT_SWAP || 1715 m->object->type == OBJT_DEFAULT) { 1716 vm_pageout_flush(&m, 1, 0); 1717 goto again1; 1718 } 1719 } 1720 if ((m->dirty == 0) && (m->busy == 0) && (m->hold_count == 0)) 1721 vm_page_cache(m); 1722 } 1723 1724 for (m = TAILQ_FIRST(&vm_page_queue_active); 1725 m != NULL; 1726 m = next) { 1727 1728 if (m->queue != PQ_ACTIVE) { 1729 break; 1730 } 1731 1732 next = TAILQ_NEXT(m, pageq); 1733 if (vm_page_sleep_busy(m, TRUE, "vpctw1")) 1734 goto again1; 1735 vm_page_test_dirty(m); 1736 if (m->dirty) { 1737 if (m->object->type == OBJT_VNODE) { 1738 vn_lock(m->object->handle, LK_EXCLUSIVE | LK_RETRY, curproc); 1739 vm_object_page_clean(m->object, 0, 0, OBJPC_SYNC); 1740 VOP_UNLOCK(m->object->handle, 0, curproc); 1741 goto again1; 1742 } else if (m->object->type == OBJT_SWAP || 1743 m->object->type == OBJT_DEFAULT) { 1744 vm_pageout_flush(&m, 1, 0); 1745 goto again1; 1746 } 1747 } 1748 if ((m->dirty == 0) && (m->busy == 0) && (m->hold_count == 0)) 1749 vm_page_cache(m); 1750 } 1751 1752 splx(s); 1753 continue; 1754 } 1755 start = i; 1756 1757 /* 1758 * Check successive pages for contiguous and free. 1759 */ 1760 for (i = start + 1; i < (start + size / PAGE_SIZE); i++) { 1761 int pqtype; 1762 pqtype = pga[i].queue - pga[i].pc; 1763 if ((VM_PAGE_TO_PHYS(&pga[i]) != 1764 (VM_PAGE_TO_PHYS(&pga[i - 1]) + PAGE_SIZE)) || 1765 ((pqtype != PQ_FREE) && (pqtype != PQ_CACHE))) { 1766 start++; 1767 goto again; 1768 } 1769 } 1770 1771 for (i = start; i < (start + size / PAGE_SIZE); i++) { 1772 int pqtype; 1773 vm_page_t m = &pga[i]; 1774 1775 pqtype = m->queue - m->pc; 1776 if (pqtype == PQ_CACHE) { 1777 vm_page_busy(m); 1778 vm_page_free(m); 1779 } 1780 1781 TAILQ_REMOVE(vm_page_queues[m->queue].pl, m, pageq); 1782 (*vm_page_queues[m->queue].lcnt)--; 1783 cnt.v_free_count--; 1784 m->valid = VM_PAGE_BITS_ALL; 1785 m->flags = 0; 1786 m->dirty = 0; 1787 m->wire_count = 0; 1788 m->busy = 0; 1789 m->queue = PQ_NONE; 1790 m->object = NULL; 1791 vm_page_wire(m); 1792 } 1793 1794 /* 1795 * We've found a contiguous chunk that meets are requirements. 1796 * Allocate kernel VM, unfree and assign the physical pages to it and 1797 * return kernel VM pointer. 1798 */ 1799 tmp_addr = addr = kmem_alloc_pageable(map, size); 1800 if (addr == 0) { 1801 /* 1802 * XXX We almost never run out of kernel virtual 1803 * space, so we don't make the allocated memory 1804 * above available. 1805 */ 1806 splx(s); 1807 return (NULL); 1808 } 1809 1810 for (i = start; i < (start + size / PAGE_SIZE); i++) { 1811 vm_page_t m = &pga[i]; 1812 vm_page_insert(m, kernel_object, 1813 OFF_TO_IDX(tmp_addr - VM_MIN_KERNEL_ADDRESS)); 1814 pmap_kenter(tmp_addr, VM_PAGE_TO_PHYS(m)); 1815 tmp_addr += PAGE_SIZE; 1816 } 1817 1818 splx(s); 1819 return ((void *)addr); 1820 } 1821 return NULL; 1822} 1823 1824void * 1825contigmalloc(size, type, flags, low, high, alignment, boundary) 1826 unsigned long size; /* should be size_t here and for malloc() */ 1827 struct malloc_type *type; 1828 int flags; 1829 unsigned long low; 1830 unsigned long high; 1831 unsigned long alignment; 1832 unsigned long boundary; 1833{ 1834 return contigmalloc1(size, type, flags, low, high, alignment, boundary, 1835 kernel_map); 1836} 1837 1838vm_offset_t 1839vm_page_alloc_contig(size, low, high, alignment) 1840 vm_offset_t size; 1841 vm_offset_t low; 1842 vm_offset_t high; 1843 vm_offset_t alignment; 1844{ 1845 return ((vm_offset_t)contigmalloc1(size, M_DEVBUF, M_NOWAIT, low, high, 1846 alignment, 0ul, kernel_map)); 1847} 1848 1849#include "opt_ddb.h" 1850#ifdef DDB 1851#include <sys/kernel.h> 1852 1853#include <ddb/ddb.h> 1854 1855DB_SHOW_COMMAND(page, vm_page_print_page_info) 1856{ 1857 db_printf("cnt.v_free_count: %d\n", cnt.v_free_count); 1858 db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count); 1859 db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count); 1860 db_printf("cnt.v_active_count: %d\n", cnt.v_active_count); 1861 db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count); 1862 db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved); 1863 db_printf("cnt.v_free_min: %d\n", cnt.v_free_min); 1864 db_printf("cnt.v_free_target: %d\n", cnt.v_free_target); 1865 db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min); 1866 db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target); 1867} 1868 1869DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info) 1870{ 1871 int i; 1872 db_printf("PQ_FREE:"); 1873 for(i=0;i<PQ_L2_SIZE;i++) { 1874 db_printf(" %d", *vm_page_queues[PQ_FREE + i].lcnt); 1875 } 1876 db_printf("\n"); 1877 1878 db_printf("PQ_CACHE:"); 1879 for(i=0;i<PQ_L2_SIZE;i++) { 1880 db_printf(" %d", *vm_page_queues[PQ_CACHE + i].lcnt); 1881 } 1882 db_printf("\n"); 1883 1884 db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n", 1885 *vm_page_queues[PQ_ACTIVE].lcnt, 1886 *vm_page_queues[PQ_INACTIVE].lcnt); 1887} 1888#endif /* DDB */ 1889