vm_page.c revision 44880
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 37 * $Id: vm_page.c,v 1.127 1999/02/24 21:26:26 dillon Exp $ 38 */ 39 40/* 41 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 42 * All rights reserved. 43 * 44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 45 * 46 * Permission to use, copy, modify and distribute this software and 47 * its documentation is hereby granted, provided that both the copyright 48 * notice and this permission notice appear in all copies of the 49 * software, derivative works or modified versions, and any portions 50 * thereof, and that both notices appear in supporting documentation. 51 * 52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 53 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 55 * 56 * Carnegie Mellon requests users of this software to return to 57 * 58 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 59 * School of Computer Science 60 * Carnegie Mellon University 61 * Pittsburgh PA 15213-3890 62 * 63 * any improvements or extensions that they make and grant Carnegie the 64 * rights to redistribute these changes. 65 */ 66 67/* 68 * Resident memory management module. 69 */ 70 71#include <sys/param.h> 72#include <sys/systm.h> 73#include <sys/malloc.h> 74#include <sys/proc.h> 75#include <sys/vmmeter.h> 76#include <sys/vnode.h> 77 78#include <vm/vm.h> 79#include <vm/vm_param.h> 80#include <vm/vm_prot.h> 81#include <sys/lock.h> 82#include <vm/vm_kern.h> 83#include <vm/vm_object.h> 84#include <vm/vm_page.h> 85#include <vm/vm_pageout.h> 86#include <vm/vm_pager.h> 87#include <vm/vm_extern.h> 88 89static void vm_page_queue_init __P((void)); 90static vm_page_t vm_page_select_cache __P((vm_object_t, vm_pindex_t)); 91 92/* 93 * Associated with page of user-allocatable memory is a 94 * page structure. 95 */ 96 97static struct vm_page **vm_page_buckets; /* Array of buckets */ 98static int vm_page_bucket_count; /* How big is array? */ 99static int vm_page_hash_mask; /* Mask for hash function */ 100static volatile int vm_page_bucket_generation; 101 102struct pglist vm_page_queue_free[PQ_L2_SIZE] = {{0}}; 103struct pglist vm_page_queue_active = {0}; 104struct pglist vm_page_queue_inactive = {0}; 105struct pglist vm_page_queue_cache[PQ_L2_SIZE] = {{0}}; 106 107static int no_queue=0; 108 109struct vpgqueues vm_page_queues[PQ_COUNT] = {{0}}; 110static int pqcnt[PQ_COUNT] = {0}; 111 112static void 113vm_page_queue_init(void) { 114 int i; 115 116 vm_page_queues[PQ_NONE].pl = NULL; 117 vm_page_queues[PQ_NONE].cnt = &no_queue; 118 for(i=0;i<PQ_L2_SIZE;i++) { 119 vm_page_queues[PQ_FREE+i].pl = &vm_page_queue_free[i]; 120 vm_page_queues[PQ_FREE+i].cnt = &cnt.v_free_count; 121 } 122 vm_page_queues[PQ_INACTIVE].pl = &vm_page_queue_inactive; 123 vm_page_queues[PQ_INACTIVE].cnt = &cnt.v_inactive_count; 124 125 vm_page_queues[PQ_ACTIVE].pl = &vm_page_queue_active; 126 vm_page_queues[PQ_ACTIVE].cnt = &cnt.v_active_count; 127 for(i=0;i<PQ_L2_SIZE;i++) { 128 vm_page_queues[PQ_CACHE+i].pl = &vm_page_queue_cache[i]; 129 vm_page_queues[PQ_CACHE+i].cnt = &cnt.v_cache_count; 130 } 131 for(i=0;i<PQ_COUNT;i++) { 132 if (vm_page_queues[i].pl) { 133 TAILQ_INIT(vm_page_queues[i].pl); 134 } else if (i != 0) { 135 panic("vm_page_queue_init: queue %d is null", i); 136 } 137 vm_page_queues[i].lcnt = &pqcnt[i]; 138 } 139} 140 141vm_page_t vm_page_array = 0; 142static int vm_page_array_size = 0; 143long first_page = 0; 144static long last_page; 145static vm_size_t page_mask; 146static int page_shift; 147int vm_page_zero_count = 0; 148 149/* 150 * map of contiguous valid DEV_BSIZE chunks in a page 151 * (this list is valid for page sizes upto 16*DEV_BSIZE) 152 */ 153static u_short vm_page_dev_bsize_chunks[] = { 154 0x0, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 155 0x1ff, 0x3ff, 0x7ff, 0xfff, 0x1fff, 0x3fff, 0x7fff, 0xffff 156}; 157 158static __inline int vm_page_hash __P((vm_object_t object, vm_pindex_t pindex)); 159static void vm_page_free_wakeup __P((void)); 160 161/* 162 * vm_set_page_size: 163 * 164 * Sets the page size, perhaps based upon the memory 165 * size. Must be called before any use of page-size 166 * dependent functions. 167 * 168 * Sets page_shift and page_mask from cnt.v_page_size. 169 */ 170void 171vm_set_page_size() 172{ 173 174 if (cnt.v_page_size == 0) 175 cnt.v_page_size = DEFAULT_PAGE_SIZE; 176 page_mask = cnt.v_page_size - 1; 177 if ((page_mask & cnt.v_page_size) != 0) 178 panic("vm_set_page_size: page size not a power of two"); 179 for (page_shift = 0;; page_shift++) 180 if ((1 << page_shift) == cnt.v_page_size) 181 break; 182} 183 184/* 185 * vm_page_startup: 186 * 187 * Initializes the resident memory module. 188 * 189 * Allocates memory for the page cells, and 190 * for the object/offset-to-page hash table headers. 191 * Each page cell is initialized and placed on the free list. 192 */ 193 194vm_offset_t 195vm_page_startup(starta, enda, vaddr) 196 register vm_offset_t starta; 197 vm_offset_t enda; 198 register vm_offset_t vaddr; 199{ 200 register vm_offset_t mapped; 201 register vm_page_t m; 202 register struct vm_page **bucket; 203 vm_size_t npages, page_range; 204 register vm_offset_t new_start; 205 int i; 206 vm_offset_t pa; 207 int nblocks; 208 vm_offset_t first_managed_page; 209 210 /* the biggest memory array is the second group of pages */ 211 vm_offset_t start; 212 vm_offset_t biggestone, biggestsize; 213 214 vm_offset_t total; 215 216 total = 0; 217 biggestsize = 0; 218 biggestone = 0; 219 nblocks = 0; 220 vaddr = round_page(vaddr); 221 222 for (i = 0; phys_avail[i + 1]; i += 2) { 223 phys_avail[i] = round_page(phys_avail[i]); 224 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]); 225 } 226 227 for (i = 0; phys_avail[i + 1]; i += 2) { 228 int size = phys_avail[i + 1] - phys_avail[i]; 229 230 if (size > biggestsize) { 231 biggestone = i; 232 biggestsize = size; 233 } 234 ++nblocks; 235 total += size; 236 } 237 238 start = phys_avail[biggestone]; 239 240 /* 241 * Initialize the queue headers for the free queue, the active queue 242 * and the inactive queue. 243 */ 244 245 vm_page_queue_init(); 246 247 /* 248 * Allocate (and initialize) the hash table buckets. 249 * 250 * The number of buckets MUST BE a power of 2, and the actual value is 251 * the next power of 2 greater than the number of physical pages in 252 * the system. 253 * 254 * We make the hash table approximately 2x the number of pages to 255 * reduce the chain length. This is about the same size using the 256 * singly-linked list as the 1x hash table we were using before 257 * using TAILQ but the chain length will be smaller. 258 * 259 * Note: This computation can be tweaked if desired. 260 */ 261 vm_page_buckets = (struct vm_page **)vaddr; 262 bucket = vm_page_buckets; 263 if (vm_page_bucket_count == 0) { 264 vm_page_bucket_count = 1; 265 while (vm_page_bucket_count < atop(total)) 266 vm_page_bucket_count <<= 1; 267 } 268 vm_page_bucket_count <<= 1; 269 vm_page_hash_mask = vm_page_bucket_count - 1; 270 271 /* 272 * Validate these addresses. 273 */ 274 275 new_start = start + vm_page_bucket_count * sizeof(struct vm_page *); 276 new_start = round_page(new_start); 277 mapped = round_page(vaddr); 278 vaddr = pmap_map(mapped, start, new_start, 279 VM_PROT_READ | VM_PROT_WRITE); 280 start = new_start; 281 vaddr = round_page(vaddr); 282 bzero((caddr_t) mapped, vaddr - mapped); 283 284 for (i = 0; i < vm_page_bucket_count; i++) { 285 *bucket = NULL; 286 bucket++; 287 } 288 289 /* 290 * Compute the number of pages of memory that will be available for 291 * use (taking into account the overhead of a page structure per 292 * page). 293 */ 294 295 first_page = phys_avail[0] / PAGE_SIZE; 296 last_page = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE; 297 298 page_range = last_page - (phys_avail[0] / PAGE_SIZE); 299 npages = (total - (page_range * sizeof(struct vm_page)) - 300 (start - phys_avail[biggestone])) / PAGE_SIZE; 301 302 /* 303 * Initialize the mem entry structures now, and put them in the free 304 * queue. 305 */ 306 vm_page_array = (vm_page_t) vaddr; 307 mapped = vaddr; 308 309 /* 310 * Validate these addresses. 311 */ 312 new_start = round_page(start + page_range * sizeof(struct vm_page)); 313 mapped = pmap_map(mapped, start, new_start, 314 VM_PROT_READ | VM_PROT_WRITE); 315 start = new_start; 316 317 first_managed_page = start / PAGE_SIZE; 318 319 /* 320 * Clear all of the page structures 321 */ 322 bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page)); 323 vm_page_array_size = page_range; 324 325 /* 326 * Construct the free queue(s) in descending order (by physical 327 * address) so that the first 16MB of physical memory is allocated 328 * last rather than first. On large-memory machines, this avoids 329 * the exhaustion of low physical memory before isa_dmainit has run. 330 */ 331 cnt.v_page_count = 0; 332 cnt.v_free_count = 0; 333 for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) { 334 if (i == biggestone) 335 pa = ptoa(first_managed_page); 336 else 337 pa = phys_avail[i]; 338 while (pa < phys_avail[i + 1] && npages-- > 0) { 339 ++cnt.v_page_count; 340 ++cnt.v_free_count; 341 m = PHYS_TO_VM_PAGE(pa); 342 m->phys_addr = pa; 343 m->flags = 0; 344 m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK; 345 m->queue = m->pc + PQ_FREE; 346 TAILQ_INSERT_HEAD(vm_page_queues[m->queue].pl, m, pageq); 347 ++(*vm_page_queues[m->queue].lcnt); 348 pa += PAGE_SIZE; 349 } 350 } 351 return (mapped); 352} 353 354/* 355 * vm_page_hash: 356 * 357 * Distributes the object/offset key pair among hash buckets. 358 * 359 * NOTE: This macro depends on vm_page_bucket_count being a power of 2. 360 * This routine may not block. 361 * 362 * We try to randomize the hash based on the object to spread the pages 363 * out in the hash table without it costing us too much. 364 */ 365static __inline int 366vm_page_hash(object, pindex) 367 vm_object_t object; 368 vm_pindex_t pindex; 369{ 370 int i = ((uintptr_t)object + pindex) ^ object->hash_rand; 371 372 return(i & vm_page_hash_mask); 373} 374 375/* 376 * vm_page_insert: [ internal use only ] 377 * 378 * Inserts the given mem entry into the object and object list. 379 * 380 * The pagetables are not updated but will presumably fault the page 381 * in if necessary, or if a kernel page the caller will at some point 382 * enter the page into the kernel's pmap. We are not allowed to block 383 * here so we *can't* do this anyway. 384 * 385 * The object and page must be locked, and must be splhigh. 386 * This routine may not block. 387 */ 388 389void 390vm_page_insert(m, object, pindex) 391 register vm_page_t m; 392 register vm_object_t object; 393 register vm_pindex_t pindex; 394{ 395 register struct vm_page **bucket; 396 397 if (m->object != NULL) 398 panic("vm_page_insert: already inserted"); 399 400 /* 401 * Record the object/offset pair in this page 402 */ 403 404 m->object = object; 405 m->pindex = pindex; 406 407 /* 408 * Insert it into the object_object/offset hash table 409 */ 410 411 bucket = &vm_page_buckets[vm_page_hash(object, pindex)]; 412 m->hnext = *bucket; 413 *bucket = m; 414 vm_page_bucket_generation++; 415 416 /* 417 * Now link into the object's list of backed pages. 418 */ 419 420 TAILQ_INSERT_TAIL(&object->memq, m, listq); 421 m->object->generation++; 422 423 if (m->wire_count) 424 object->wire_count++; 425 426 if ((m->queue - m->pc) == PQ_CACHE) 427 object->cache_count++; 428 429 /* 430 * show that the object has one more resident page. 431 */ 432 433 object->resident_page_count++; 434 435 /* 436 * Since we are inserting a new and possibly dirty page, 437 * update the object's OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY flags. 438 */ 439 if (m->flags & PG_WRITEABLE) 440 vm_object_set_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY); 441} 442 443/* 444 * vm_page_remove: 445 * NOTE: used by device pager as well -wfj 446 * 447 * Removes the given mem entry from the object/offset-page 448 * table and the object page list, but do not invalidate/terminate 449 * the backing store. 450 * 451 * The object and page must be locked, and at splhigh. 452 * The underlying pmap entry (if any) is NOT removed here. 453 * This routine may not block. 454 */ 455 456void 457vm_page_remove(m) 458 vm_page_t m; 459{ 460 vm_object_t object; 461 462 if (m->object == NULL) 463 return; 464 465#if !defined(MAX_PERF) 466 if ((m->flags & PG_BUSY) == 0) { 467 panic("vm_page_remove: page not busy"); 468 } 469#endif 470 471 /* 472 * Basically destroy the page. 473 */ 474 475 vm_page_wakeup(m); 476 477 object = m->object; 478 479 if (m->wire_count) 480 object->wire_count--; 481 482 if ((m->queue - m->pc) == PQ_CACHE) 483 object->cache_count--; 484 485 /* 486 * Remove from the object_object/offset hash table. The object 487 * must be on the hash queue, we will panic if it isn't 488 * 489 * Note: we must NULL-out m->hnext to prevent loops in detached 490 * buffers with vm_page_lookup(). 491 */ 492 493 { 494 struct vm_page **bucket; 495 496 bucket = &vm_page_buckets[vm_page_hash(m->object, m->pindex)]; 497 while (*bucket != m) { 498#if !defined(MAX_PERF) 499 if (*bucket == NULL) 500 panic("vm_page_remove(): page not found in hash"); 501#endif 502 bucket = &(*bucket)->hnext; 503 } 504 *bucket = m->hnext; 505 m->hnext = NULL; 506 vm_page_bucket_generation++; 507 } 508 509 /* 510 * Now remove from the object's list of backed pages. 511 */ 512 513 TAILQ_REMOVE(&object->memq, m, listq); 514 515 /* 516 * And show that the object has one fewer resident page. 517 */ 518 519 object->resident_page_count--; 520 object->generation++; 521 522 m->object = NULL; 523} 524 525/* 526 * vm_page_lookup: 527 * 528 * Returns the page associated with the object/offset 529 * pair specified; if none is found, NULL is returned. 530 * 531 * NOTE: the code below does not lock. It will operate properly if 532 * an interrupt makes a change, but the generation algorithm will not 533 * operate properly in an SMP environment where both cpu's are able to run 534 * kernel code simultaniously. 535 * 536 * The object must be locked. No side effects. 537 * This routine may not block. 538 * This is a critical path routine 539 */ 540 541vm_page_t 542vm_page_lookup(object, pindex) 543 register vm_object_t object; 544 register vm_pindex_t pindex; 545{ 546 register vm_page_t m; 547 register struct vm_page **bucket; 548 int generation; 549 550 /* 551 * Search the hash table for this object/offset pair 552 */ 553 554retry: 555 generation = vm_page_bucket_generation; 556 bucket = &vm_page_buckets[vm_page_hash(object, pindex)]; 557 for (m = *bucket; m != NULL; m = m->hnext) { 558 if ((m->object == object) && (m->pindex == pindex)) { 559 if (vm_page_bucket_generation != generation) 560 goto retry; 561 return (m); 562 } 563 } 564 if (vm_page_bucket_generation != generation) 565 goto retry; 566 return (NULL); 567} 568 569/* 570 * vm_page_rename: 571 * 572 * Move the given memory entry from its 573 * current object to the specified target object/offset. 574 * 575 * The object must be locked. 576 * This routine may not block. 577 * 578 * Note: this routine will raise itself to splvm(), the caller need not. 579 * 580 * Note: swap associated with the page must be invalidated by the move. We 581 * have to do this for several reasons: (1) we aren't freeing the 582 * page, (2) we are dirtying the page, (3) the VM system is probably 583 * moving the page from object A to B, and will then later move 584 * the backing store from A to B and we can't have a conflict. 585 * 586 * Note: we *always* dirty the page. It is necessary both for the 587 * fact that we moved it, and because we may be invalidating 588 * swap. If the page is on the cache, we have to deactivate it 589 * or vm_page_dirty() will panic. Dirty pages are not allowed 590 * on the cache. 591 */ 592 593void 594vm_page_rename(m, new_object, new_pindex) 595 register vm_page_t m; 596 register vm_object_t new_object; 597 vm_pindex_t new_pindex; 598{ 599 int s; 600 601 s = splvm(); 602 vm_page_remove(m); 603 vm_page_insert(m, new_object, new_pindex); 604 if (m->queue - m->pc == PQ_CACHE) 605 vm_page_deactivate(m); 606 vm_page_dirty(m); 607 splx(s); 608} 609 610/* 611 * vm_page_unqueue_nowakeup: 612 * 613 * vm_page_unqueue() without any wakeup 614 * 615 * This routine must be called at splhigh(). 616 * This routine may not block. 617 */ 618 619void 620vm_page_unqueue_nowakeup(m) 621 vm_page_t m; 622{ 623 int queue = m->queue; 624 struct vpgqueues *pq; 625 if (queue != PQ_NONE) { 626 pq = &vm_page_queues[queue]; 627 m->queue = PQ_NONE; 628 TAILQ_REMOVE(pq->pl, m, pageq); 629 (*pq->cnt)--; 630 (*pq->lcnt)--; 631 if ((queue - m->pc) == PQ_CACHE) { 632 if (m->object) 633 m->object->cache_count--; 634 } 635 } 636} 637 638/* 639 * vm_page_unqueue: 640 * 641 * Remove a page from its queue. 642 * 643 * This routine must be called at splhigh(). 644 * This routine may not block. 645 */ 646 647void 648vm_page_unqueue(m) 649 vm_page_t m; 650{ 651 int queue = m->queue; 652 struct vpgqueues *pq; 653 if (queue != PQ_NONE) { 654 m->queue = PQ_NONE; 655 pq = &vm_page_queues[queue]; 656 TAILQ_REMOVE(pq->pl, m, pageq); 657 (*pq->cnt)--; 658 (*pq->lcnt)--; 659 if ((queue - m->pc) == PQ_CACHE) { 660 if ((cnt.v_cache_count + cnt.v_free_count) < 661 (cnt.v_free_reserved + cnt.v_cache_min)) 662 pagedaemon_wakeup(); 663 if (m->object) 664 m->object->cache_count--; 665 } 666 } 667} 668 669#if PQ_L2_SIZE > 1 670 671/* 672 * vm_page_list_find: 673 * 674 * Find a page on the specified queue with color optimization. 675 * 676 * The page coloring optimization attempts to locate a page 677 * that does not overload other nearby pages in the object in 678 * the cpu's L1 or L2 caches. We need this optmization because 679 * cpu caches tend to be physical caches, while object spaces tend 680 * to be virtual. 681 * 682 * This routine must be called at splvm(). 683 * This routine may not block. 684 * 685 * This routine may only be called from the vm_page_list_find() macro 686 * in vm_page.h 687 */ 688vm_page_t 689_vm_page_list_find(basequeue, index) 690 int basequeue, index; 691{ 692 int i; 693 vm_page_t m = NULL; 694 struct vpgqueues *pq; 695 696 pq = &vm_page_queues[basequeue]; 697 698 /* 699 * Note that for the first loop, index+i and index-i wind up at the 700 * same place. Even though this is not totally optimal, we've already 701 * blown it by missing the cache case so we do not care. 702 */ 703 704 for(i = PQ_L2_SIZE / 2; i > 0; --i) { 705 if ((m = TAILQ_FIRST(pq[(index + i) & PQ_L2_MASK].pl)) != NULL) 706 break; 707 708 if ((m = TAILQ_FIRST(pq[(index - i) & PQ_L2_MASK].pl)) != NULL) 709 break; 710 } 711 return(m); 712} 713 714#endif 715 716/* 717 * vm_page_select_cache: 718 * 719 * Find a page on the cache queue with color optimization. As pages 720 * might be found, but not applicable, they are deactivated. This 721 * keeps us from using potentially busy cached pages. 722 * 723 * This routine must be called at splvm(). 724 * This routine may not block. 725 */ 726vm_page_t 727vm_page_select_cache(object, pindex) 728 vm_object_t object; 729 vm_pindex_t pindex; 730{ 731 vm_page_t m; 732 733 while (TRUE) { 734 m = vm_page_list_find( 735 PQ_CACHE, 736 (pindex + object->pg_color) & PQ_L2_MASK, 737 FALSE 738 ); 739 if (m && ((m->flags & PG_BUSY) || m->busy || 740 m->hold_count || m->wire_count)) { 741 vm_page_deactivate(m); 742 continue; 743 } 744 return m; 745 } 746} 747 748/* 749 * vm_page_select_free: 750 * 751 * Find a free or zero page, with specified preference. We attempt to 752 * inline the nominal case and fall back to _vm_page_select_free() 753 * otherwise. 754 * 755 * This routine must be called at splvm(). 756 * This routine may not block. 757 */ 758 759static __inline vm_page_t 760vm_page_select_free(vm_object_t object, vm_pindex_t pindex, boolean_t prefer_zero) 761{ 762 vm_page_t m; 763 764 m = vm_page_list_find( 765 PQ_FREE, 766 (pindex + object->pg_color) & PQ_L2_MASK, 767 prefer_zero 768 ); 769 return(m); 770} 771 772/* 773 * vm_page_alloc: 774 * 775 * Allocate and return a memory cell associated 776 * with this VM object/offset pair. 777 * 778 * page_req classes: 779 * VM_ALLOC_NORMAL normal process request 780 * VM_ALLOC_SYSTEM system *really* needs a page 781 * VM_ALLOC_INTERRUPT interrupt time request 782 * VM_ALLOC_ZERO zero page 783 * 784 * Object must be locked. 785 * This routine may not block. 786 * 787 * Additional special handling is required when called from an 788 * interrupt (VM_ALLOC_INTERRUPT). We are not allowed to mess with 789 * the page cache in this case. 790 */ 791 792vm_page_t 793vm_page_alloc(object, pindex, page_req) 794 vm_object_t object; 795 vm_pindex_t pindex; 796 int page_req; 797{ 798 register vm_page_t m = NULL; 799 int s; 800 801 KASSERT(!vm_page_lookup(object, pindex), 802 ("vm_page_alloc: page already allocated")); 803 804 /* 805 * The pager is allowed to eat deeper into the free page list. 806 */ 807 808 if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) { 809 page_req = VM_ALLOC_SYSTEM; 810 }; 811 812 s = splvm(); 813 814loop: 815 if (cnt.v_free_count > cnt.v_free_reserved) { 816 /* 817 * Allocate from the free queue if there are plenty of pages 818 * in it. 819 */ 820 if (page_req == VM_ALLOC_ZERO) 821 m = vm_page_select_free(object, pindex, TRUE); 822 else 823 m = vm_page_select_free(object, pindex, FALSE); 824 } else if ( 825 (page_req == VM_ALLOC_SYSTEM && 826 cnt.v_cache_count == 0 && 827 cnt.v_free_count > cnt.v_interrupt_free_min) || 828 (page_req == VM_ALLOC_INTERRUPT && cnt.v_free_count > 0) 829 ) { 830 /* 831 * Interrupt or system, dig deeper into the free list. 832 */ 833 m = vm_page_select_free(object, pindex, FALSE); 834 } else if (page_req != VM_ALLOC_INTERRUPT) { 835 /* 836 * Allocateable from cache (non-interrupt only). On success, 837 * we must free the page and try again, thus ensuring that 838 * cnt.v_*_free_min counters are replenished. 839 */ 840 m = vm_page_select_cache(object, pindex); 841 if (m == NULL) { 842 splx(s); 843#if defined(DIAGNOSTIC) 844 if (cnt.v_cache_count > 0) 845 printf("vm_page_alloc(NORMAL): missing pages on cache queue: %d\n", cnt.v_cache_count); 846#endif 847 vm_pageout_deficit++; 848 pagedaemon_wakeup(); 849 return (NULL); 850 } 851 KASSERT(m->dirty == 0, ("Found dirty cache page %p", m)); 852 vm_page_busy(m); 853 vm_page_protect(m, VM_PROT_NONE); 854 vm_page_free(m); 855 goto loop; 856 } else { 857 /* 858 * Not allocateable from cache from interrupt, give up. 859 */ 860 splx(s); 861 vm_pageout_deficit++; 862 pagedaemon_wakeup(); 863 return (NULL); 864 } 865 866 /* 867 * At this point we had better have found a good page. 868 */ 869 870 KASSERT( 871 m != NULL, 872 ("vm_page_alloc(): missing page on free queue\n") 873 ); 874 875 /* 876 * Remove from free queue 877 */ 878 879 { 880 struct vpgqueues *pq = &vm_page_queues[m->queue]; 881 882 TAILQ_REMOVE(pq->pl, m, pageq); 883 (*pq->cnt)--; 884 (*pq->lcnt)--; 885 } 886 887 /* 888 * Initialize structure. Only the PG_ZERO flag is inherited. 889 */ 890 891 if (m->flags & PG_ZERO) { 892 vm_page_zero_count--; 893 m->flags = PG_ZERO | PG_BUSY; 894 } else { 895 m->flags = PG_BUSY; 896 } 897 m->wire_count = 0; 898 m->hold_count = 0; 899 m->act_count = 0; 900 m->busy = 0; 901 m->valid = 0; 902 m->dirty = 0; 903 m->queue = PQ_NONE; 904 905 /* 906 * vm_page_insert() is safe prior to the splx(). Note also that 907 * inserting a page here does not insert it into the pmap (which 908 * could cause us to block allocating memory). We cannot block 909 * anywhere. 910 */ 911 912 vm_page_insert(m, object, pindex); 913 914 /* 915 * Don't wakeup too often - wakeup the pageout daemon when 916 * we would be nearly out of memory. 917 */ 918 if (((cnt.v_free_count + cnt.v_cache_count) < 919 (cnt.v_free_reserved + cnt.v_cache_min)) || 920 (cnt.v_free_count < cnt.v_pageout_free_min)) 921 pagedaemon_wakeup(); 922 923 splx(s); 924 925 return (m); 926} 927 928/* 929 * vm_wait: (also see VM_WAIT macro) 930 * 931 * Block until free pages are available for allocation 932 */ 933 934void 935vm_wait() 936{ 937 int s; 938 939 s = splvm(); 940 if (curproc == pageproc) { 941 vm_pageout_pages_needed = 1; 942 tsleep(&vm_pageout_pages_needed, PSWP, "vmwait", 0); 943 } else { 944 if (!vm_pages_needed) { 945 vm_pages_needed++; 946 wakeup(&vm_pages_needed); 947 } 948 tsleep(&cnt.v_free_count, PVM, "vmwait", 0); 949 } 950 splx(s); 951} 952 953/* 954 * vm_await: (also see VM_AWAIT macro) 955 * 956 * asleep on an event that will signal when free pages are available 957 * for allocation. 958 */ 959 960void 961vm_await() 962{ 963 int s; 964 965 s = splvm(); 966 if (curproc == pageproc) { 967 vm_pageout_pages_needed = 1; 968 asleep(&vm_pageout_pages_needed, PSWP, "vmwait", 0); 969 } else { 970 if (!vm_pages_needed) { 971 vm_pages_needed++; 972 wakeup(&vm_pages_needed); 973 } 974 asleep(&cnt.v_free_count, PVM, "vmwait", 0); 975 } 976 splx(s); 977} 978 979#if 0 980/* 981 * vm_page_sleep: 982 * 983 * Block until page is no longer busy. 984 */ 985 986int 987vm_page_sleep(vm_page_t m, char *msg, char *busy) { 988 int slept = 0; 989 if ((busy && *busy) || (m->flags & PG_BUSY)) { 990 int s; 991 s = splvm(); 992 if ((busy && *busy) || (m->flags & PG_BUSY)) { 993 vm_page_flag_set(m, PG_WANTED); 994 tsleep(m, PVM, msg, 0); 995 slept = 1; 996 } 997 splx(s); 998 } 999 return slept; 1000} 1001 1002#endif 1003 1004#if 0 1005 1006/* 1007 * vm_page_asleep: 1008 * 1009 * Similar to vm_page_sleep(), but does not block. Returns 0 if 1010 * the page is not busy, or 1 if the page is busy. 1011 * 1012 * This routine has the side effect of calling asleep() if the page 1013 * was busy (1 returned). 1014 */ 1015 1016int 1017vm_page_asleep(vm_page_t m, char *msg, char *busy) { 1018 int slept = 0; 1019 if ((busy && *busy) || (m->flags & PG_BUSY)) { 1020 int s; 1021 s = splvm(); 1022 if ((busy && *busy) || (m->flags & PG_BUSY)) { 1023 vm_page_flag_set(m, PG_WANTED); 1024 asleep(m, PVM, msg, 0); 1025 slept = 1; 1026 } 1027 splx(s); 1028 } 1029 return slept; 1030} 1031 1032#endif 1033 1034/* 1035 * vm_page_activate: 1036 * 1037 * Put the specified page on the active list (if appropriate). 1038 * 1039 * The page queues must be locked. 1040 * This routine may not block. 1041 */ 1042void 1043vm_page_activate(m) 1044 register vm_page_t m; 1045{ 1046 int s; 1047 1048 s = splvm(); 1049 if (m->queue != PQ_ACTIVE) { 1050 if ((m->queue - m->pc) == PQ_CACHE) 1051 cnt.v_reactivated++; 1052 1053 vm_page_unqueue(m); 1054 1055 if (m->wire_count == 0) { 1056 m->queue = PQ_ACTIVE; 1057 ++(*vm_page_queues[PQ_ACTIVE].lcnt); 1058 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 1059 if (m->act_count < ACT_INIT) 1060 m->act_count = ACT_INIT; 1061 cnt.v_active_count++; 1062 } 1063 } else { 1064 if (m->act_count < ACT_INIT) 1065 m->act_count = ACT_INIT; 1066 } 1067 1068 splx(s); 1069} 1070 1071/* 1072 * vm_page_free_wakeup: 1073 * 1074 * Helper routine for vm_page_free_toq() and vm_page_cache(). This 1075 * routine is called when a page has been added to the cache or free 1076 * queues. 1077 * 1078 * This routine may not block. 1079 * This routine must be called at splvm() 1080 */ 1081static __inline void 1082vm_page_free_wakeup() 1083{ 1084 /* 1085 * if pageout daemon needs pages, then tell it that there are 1086 * some free. 1087 */ 1088 if (vm_pageout_pages_needed) { 1089 wakeup(&vm_pageout_pages_needed); 1090 vm_pageout_pages_needed = 0; 1091 } 1092 /* 1093 * wakeup processes that are waiting on memory if we hit a 1094 * high water mark. And wakeup scheduler process if we have 1095 * lots of memory. this process will swapin processes. 1096 */ 1097 if (vm_pages_needed && 1098 ((cnt.v_free_count + cnt.v_cache_count) >= cnt.v_free_min)) { 1099 wakeup(&cnt.v_free_count); 1100 vm_pages_needed = 0; 1101 } 1102} 1103 1104/* 1105 * vm_page_free_toq: 1106 * 1107 * Returns the given page to the PQ_FREE or PQ_ZERO list, 1108 * disassociating it with any VM object. 1109 * 1110 * Object and page must be locked prior to entry. 1111 * This routine may not block. 1112 */ 1113 1114void 1115vm_page_free_toq(vm_page_t m) 1116{ 1117 int s; 1118 struct vpgqueues *pq; 1119 vm_object_t object = m->object; 1120 1121 s = splvm(); 1122 1123 cnt.v_tfree++; 1124 1125#if !defined(MAX_PERF) 1126 if (m->busy || ((m->queue - m->pc) == PQ_FREE) || 1127 (m->hold_count != 0)) { 1128 printf( 1129 "vm_page_free: pindex(%lu), busy(%d), PG_BUSY(%d), hold(%d)\n", 1130 (u_long)m->pindex, m->busy, (m->flags & PG_BUSY) ? 1 : 0, 1131 m->hold_count); 1132 if ((m->queue - m->pc) == PQ_FREE) 1133 panic("vm_page_free: freeing free page"); 1134 else 1135 panic("vm_page_free: freeing busy page"); 1136 } 1137#endif 1138 1139 /* 1140 * unqueue, then remove page. Note that we cannot destroy 1141 * the page here because we do not want to call the pager's 1142 * callback routine until after we've put the page on the 1143 * appropriate free queue. 1144 */ 1145 1146 vm_page_unqueue_nowakeup(m); 1147 vm_page_remove(m); 1148 1149 /* 1150 * If fictitious remove object association and 1151 * return, otherwise delay object association removal. 1152 */ 1153 1154 if ((m->flags & PG_FICTITIOUS) != 0) { 1155 splx(s); 1156 return; 1157 } 1158 1159 m->valid = 0; 1160 1161 if (m->wire_count != 0) { 1162#if !defined(MAX_PERF) 1163 if (m->wire_count > 1) { 1164 panic("vm_page_free: invalid wire count (%d), pindex: 0x%x", 1165 m->wire_count, m->pindex); 1166 } 1167#endif 1168 printf("vm_page_free: freeing wired page\n"); 1169 m->wire_count = 0; 1170 if (m->object) 1171 m->object->wire_count--; 1172 cnt.v_wire_count--; 1173 } 1174 1175 /* 1176 * If we've exhausted the object's resident pages we want to free 1177 * it up. 1178 */ 1179 1180 if (object && 1181 (object->type == OBJT_VNODE) && 1182 ((object->flags & OBJ_DEAD) == 0) 1183 ) { 1184 struct vnode *vp = (struct vnode *)object->handle; 1185 1186 if (vp && VSHOULDFREE(vp)) { 1187 if ((vp->v_flag & (VTBFREE|VDOOMED|VFREE)) == 0) { 1188 TAILQ_INSERT_TAIL(&vnode_tobefree_list, vp, v_freelist); 1189 vp->v_flag |= VTBFREE; 1190 } 1191 } 1192 } 1193 1194#ifdef __alpha__ 1195 pmap_page_is_free(m); 1196#endif 1197 1198 m->queue = PQ_FREE + m->pc; 1199 pq = &vm_page_queues[m->queue]; 1200 ++(*pq->lcnt); 1201 ++(*pq->cnt); 1202 1203 /* 1204 * Put zero'd pages on the end ( where we look for zero'd pages 1205 * first ) and non-zerod pages at the head. 1206 */ 1207 1208 if (m->flags & PG_ZERO) { 1209 TAILQ_INSERT_TAIL(pq->pl, m, pageq); 1210 ++vm_page_zero_count; 1211 } else if (curproc == pageproc) { 1212 /* 1213 * If the pageout daemon is freeing pages, the pages are 1214 * likely to NOT be in the L1 or L2 caches due to their age. 1215 * For now we do not try to do anything special with this 1216 * info. 1217 */ 1218 TAILQ_INSERT_HEAD(pq->pl, m, pageq); 1219 } else { 1220 TAILQ_INSERT_HEAD(pq->pl, m, pageq); 1221 } 1222 1223 vm_page_free_wakeup(); 1224 1225 splx(s); 1226} 1227 1228/* 1229 * vm_page_wire: 1230 * 1231 * Mark this page as wired down by yet 1232 * another map, removing it from paging queues 1233 * as necessary. 1234 * 1235 * The page queues must be locked. 1236 * This routine may not block. 1237 */ 1238void 1239vm_page_wire(m) 1240 register vm_page_t m; 1241{ 1242 int s; 1243 1244 s = splvm(); 1245 if (m->wire_count == 0) { 1246 vm_page_unqueue(m); 1247 cnt.v_wire_count++; 1248 if (m->object) 1249 m->object->wire_count++; 1250 } 1251 m->wire_count++; 1252 splx(s); 1253 (*vm_page_queues[PQ_NONE].lcnt)++; 1254 vm_page_flag_set(m, PG_MAPPED); 1255} 1256 1257/* 1258 * vm_page_unwire: 1259 * 1260 * Release one wiring of this page, potentially 1261 * enabling it to be paged again. 1262 * 1263 * Many pages placed on the inactive queue should actually go 1264 * into the cache, but it is difficult to figure out which. What 1265 * we do instead, if the inactive target is well met, is to put 1266 * clean pages at the head of the inactive queue instead of the tail. 1267 * This will cause them to be moved to the cache more quickly and 1268 * if not actively re-referenced, freed more quickly. If we just 1269 * stick these pages at the end of the inactive queue, heavy filesystem 1270 * meta-data accesses can cause an unnecessary paging load on memory bound 1271 * processes. This optimization causes one-time-use metadata to be 1272 * reused more quickly. 1273 * 1274 * A number of routines use vm_page_unwire() to guarentee that the page 1275 * will go into either the inactive or active queues, and will NEVER 1276 * be placed in the cache - for example, just after dirtying a page. 1277 * dirty pages in the cache are not allowed. 1278 * 1279 * The page queues must be locked. 1280 * This routine may not block. 1281 */ 1282void 1283vm_page_unwire(m, activate) 1284 register vm_page_t m; 1285 int activate; 1286{ 1287 int s; 1288 1289 s = splvm(); 1290 1291 if (m->wire_count > 0) { 1292 m->wire_count--; 1293 if (m->wire_count == 0) { 1294 if (m->object) 1295 m->object->wire_count--; 1296 cnt.v_wire_count--; 1297 if (activate) { 1298 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 1299 m->queue = PQ_ACTIVE; 1300 (*vm_page_queues[PQ_ACTIVE].lcnt)++; 1301 cnt.v_active_count++; 1302 } else { 1303 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 1304 m->queue = PQ_INACTIVE; 1305 (*vm_page_queues[PQ_INACTIVE].lcnt)++; 1306 cnt.v_inactive_count++; 1307 } 1308 } 1309 } else { 1310#if !defined(MAX_PERF) 1311 panic("vm_page_unwire: invalid wire count: %d\n", m->wire_count); 1312#endif 1313 } 1314 splx(s); 1315} 1316 1317 1318/* 1319 * Move the specified page to the inactive queue. If the page has 1320 * any associated swap, the swap is deallocated. 1321 * 1322 * This routine may not block. 1323 */ 1324void 1325vm_page_deactivate(m) 1326 register vm_page_t m; 1327{ 1328 int s; 1329 1330 /* 1331 * Ignore if already inactive. 1332 */ 1333 if (m->queue == PQ_INACTIVE) 1334 return; 1335 1336 s = splvm(); 1337 if (m->wire_count == 0) { 1338 if ((m->queue - m->pc) == PQ_CACHE) 1339 cnt.v_reactivated++; 1340 vm_page_unqueue(m); 1341 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 1342 m->queue = PQ_INACTIVE; 1343 ++(*vm_page_queues[PQ_INACTIVE].lcnt); 1344 cnt.v_inactive_count++; 1345 } 1346 splx(s); 1347} 1348 1349/* 1350 * vm_page_cache 1351 * 1352 * Put the specified page onto the page cache queue (if appropriate). 1353 * 1354 * This routine may not block. 1355 */ 1356void 1357vm_page_cache(m) 1358 register vm_page_t m; 1359{ 1360 int s; 1361 1362#if !defined(MAX_PERF) 1363 if ((m->flags & PG_BUSY) || m->busy || m->wire_count) { 1364 printf("vm_page_cache: attempting to cache busy page\n"); 1365 return; 1366 } 1367#endif 1368 if ((m->queue - m->pc) == PQ_CACHE) 1369 return; 1370 1371 /* 1372 * Remove all pmaps and indicate that the page is not 1373 * writeable or mapped. 1374 */ 1375 1376 vm_page_protect(m, VM_PROT_NONE); 1377#if !defined(MAX_PERF) 1378 if (m->dirty != 0) { 1379 panic("vm_page_cache: caching a dirty page, pindex: %d", m->pindex); 1380 } 1381#endif 1382 s = splvm(); 1383 vm_page_unqueue_nowakeup(m); 1384 m->queue = PQ_CACHE + m->pc; 1385 (*vm_page_queues[m->queue].lcnt)++; 1386 TAILQ_INSERT_TAIL(vm_page_queues[m->queue].pl, m, pageq); 1387 cnt.v_cache_count++; 1388 m->object->cache_count++; 1389 vm_page_free_wakeup(); 1390 splx(s); 1391} 1392 1393/* 1394 * Grab a page, waiting until we are waken up due to the page 1395 * changing state. We keep on waiting, if the page continues 1396 * to be in the object. If the page doesn't exist, allocate it. 1397 * 1398 * This routine may block. 1399 */ 1400vm_page_t 1401vm_page_grab(object, pindex, allocflags) 1402 vm_object_t object; 1403 vm_pindex_t pindex; 1404 int allocflags; 1405{ 1406 1407 vm_page_t m; 1408 int s, generation; 1409 1410retrylookup: 1411 if ((m = vm_page_lookup(object, pindex)) != NULL) { 1412 if (m->busy || (m->flags & PG_BUSY)) { 1413 generation = object->generation; 1414 1415 s = splvm(); 1416 while ((object->generation == generation) && 1417 (m->busy || (m->flags & PG_BUSY))) { 1418 vm_page_flag_set(m, PG_WANTED | PG_REFERENCED); 1419 tsleep(m, PVM, "pgrbwt", 0); 1420 if ((allocflags & VM_ALLOC_RETRY) == 0) { 1421 splx(s); 1422 return NULL; 1423 } 1424 } 1425 splx(s); 1426 goto retrylookup; 1427 } else { 1428 vm_page_busy(m); 1429 return m; 1430 } 1431 } 1432 1433 m = vm_page_alloc(object, pindex, allocflags & ~VM_ALLOC_RETRY); 1434 if (m == NULL) { 1435 VM_WAIT; 1436 if ((allocflags & VM_ALLOC_RETRY) == 0) 1437 return NULL; 1438 goto retrylookup; 1439 } 1440 1441 return m; 1442} 1443 1444/* 1445 * mapping function for valid bits or for dirty bits in 1446 * a page. May not block. 1447 */ 1448__inline int 1449vm_page_bits(int base, int size) 1450{ 1451 u_short chunk; 1452 1453 if ((base == 0) && (size >= PAGE_SIZE)) 1454 return VM_PAGE_BITS_ALL; 1455 1456 size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1457 base &= PAGE_MASK; 1458 if (size > PAGE_SIZE - base) { 1459 size = PAGE_SIZE - base; 1460 } 1461 1462 base = base / DEV_BSIZE; 1463 chunk = vm_page_dev_bsize_chunks[size / DEV_BSIZE]; 1464 return (chunk << base) & VM_PAGE_BITS_ALL; 1465} 1466 1467/* 1468 * set a page valid and clean. May not block. 1469 */ 1470void 1471vm_page_set_validclean(m, base, size) 1472 vm_page_t m; 1473 int base; 1474 int size; 1475{ 1476 int pagebits = vm_page_bits(base, size); 1477 m->valid |= pagebits; 1478 m->dirty &= ~pagebits; 1479 if( base == 0 && size == PAGE_SIZE) 1480 pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 1481} 1482 1483/* 1484 * set a page (partially) invalid. May not block. 1485 */ 1486void 1487vm_page_set_invalid(m, base, size) 1488 vm_page_t m; 1489 int base; 1490 int size; 1491{ 1492 int bits; 1493 1494 m->valid &= ~(bits = vm_page_bits(base, size)); 1495 if (m->valid == 0) 1496 m->dirty &= ~bits; 1497 m->object->generation++; 1498} 1499 1500/* 1501 * is (partial) page valid? May not block. 1502 */ 1503int 1504vm_page_is_valid(m, base, size) 1505 vm_page_t m; 1506 int base; 1507 int size; 1508{ 1509 int bits = vm_page_bits(base, size); 1510 1511 if (m->valid && ((m->valid & bits) == bits)) 1512 return 1; 1513 else 1514 return 0; 1515} 1516 1517/* 1518 * update dirty bits from pmap/mmu. May not block. 1519 */ 1520 1521void 1522vm_page_test_dirty(m) 1523 vm_page_t m; 1524{ 1525 if ((m->dirty != VM_PAGE_BITS_ALL) && 1526 pmap_is_modified(VM_PAGE_TO_PHYS(m))) { 1527 vm_page_dirty(m); 1528 } 1529} 1530 1531/* 1532 * This interface is for merging with malloc() someday. 1533 * Even if we never implement compaction so that contiguous allocation 1534 * works after initialization time, malloc()'s data structures are good 1535 * for statistics and for allocations of less than a page. 1536 */ 1537void * 1538contigmalloc1(size, type, flags, low, high, alignment, boundary, map) 1539 unsigned long size; /* should be size_t here and for malloc() */ 1540 struct malloc_type *type; 1541 int flags; 1542 unsigned long low; 1543 unsigned long high; 1544 unsigned long alignment; 1545 unsigned long boundary; 1546 vm_map_t map; 1547{ 1548 int i, s, start; 1549 vm_offset_t addr, phys, tmp_addr; 1550 int pass; 1551 vm_page_t pga = vm_page_array; 1552 1553 size = round_page(size); 1554#if !defined(MAX_PERF) 1555 if (size == 0) 1556 panic("contigmalloc1: size must not be 0"); 1557 if ((alignment & (alignment - 1)) != 0) 1558 panic("contigmalloc1: alignment must be a power of 2"); 1559 if ((boundary & (boundary - 1)) != 0) 1560 panic("contigmalloc1: boundary must be a power of 2"); 1561#endif 1562 1563 start = 0; 1564 for (pass = 0; pass <= 1; pass++) { 1565 s = splvm(); 1566again: 1567 /* 1568 * Find first page in array that is free, within range, aligned, and 1569 * such that the boundary won't be crossed. 1570 */ 1571 for (i = start; i < cnt.v_page_count; i++) { 1572 int pqtype; 1573 phys = VM_PAGE_TO_PHYS(&pga[i]); 1574 pqtype = pga[i].queue - pga[i].pc; 1575 if (((pqtype == PQ_FREE) || (pqtype == PQ_CACHE)) && 1576 (phys >= low) && (phys < high) && 1577 ((phys & (alignment - 1)) == 0) && 1578 (((phys ^ (phys + size - 1)) & ~(boundary - 1)) == 0)) 1579 break; 1580 } 1581 1582 /* 1583 * If the above failed or we will exceed the upper bound, fail. 1584 */ 1585 if ((i == cnt.v_page_count) || 1586 ((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) { 1587 vm_page_t m, next; 1588 1589again1: 1590 for (m = TAILQ_FIRST(&vm_page_queue_inactive); 1591 m != NULL; 1592 m = next) { 1593 1594 if (m->queue != PQ_INACTIVE) { 1595 break; 1596 } 1597 1598 next = TAILQ_NEXT(m, pageq); 1599 if (vm_page_sleep_busy(m, TRUE, "vpctw0")) 1600 goto again1; 1601 vm_page_test_dirty(m); 1602 if (m->dirty) { 1603 if (m->object->type == OBJT_VNODE) { 1604 vn_lock(m->object->handle, LK_EXCLUSIVE | LK_RETRY, curproc); 1605 vm_object_page_clean(m->object, 0, 0, OBJPC_SYNC); 1606 VOP_UNLOCK(m->object->handle, 0, curproc); 1607 goto again1; 1608 } else if (m->object->type == OBJT_SWAP || 1609 m->object->type == OBJT_DEFAULT) { 1610 vm_pageout_flush(&m, 1, 0); 1611 goto again1; 1612 } 1613 } 1614 if ((m->dirty == 0) && (m->busy == 0) && (m->hold_count == 0)) 1615 vm_page_cache(m); 1616 } 1617 1618 for (m = TAILQ_FIRST(&vm_page_queue_active); 1619 m != NULL; 1620 m = next) { 1621 1622 if (m->queue != PQ_ACTIVE) { 1623 break; 1624 } 1625 1626 next = TAILQ_NEXT(m, pageq); 1627 if (vm_page_sleep_busy(m, TRUE, "vpctw1")) 1628 goto again1; 1629 vm_page_test_dirty(m); 1630 if (m->dirty) { 1631 if (m->object->type == OBJT_VNODE) { 1632 vn_lock(m->object->handle, LK_EXCLUSIVE | LK_RETRY, curproc); 1633 vm_object_page_clean(m->object, 0, 0, OBJPC_SYNC); 1634 VOP_UNLOCK(m->object->handle, 0, curproc); 1635 goto again1; 1636 } else if (m->object->type == OBJT_SWAP || 1637 m->object->type == OBJT_DEFAULT) { 1638 vm_pageout_flush(&m, 1, 0); 1639 goto again1; 1640 } 1641 } 1642 if ((m->dirty == 0) && (m->busy == 0) && (m->hold_count == 0)) 1643 vm_page_cache(m); 1644 } 1645 1646 splx(s); 1647 continue; 1648 } 1649 start = i; 1650 1651 /* 1652 * Check successive pages for contiguous and free. 1653 */ 1654 for (i = start + 1; i < (start + size / PAGE_SIZE); i++) { 1655 int pqtype; 1656 pqtype = pga[i].queue - pga[i].pc; 1657 if ((VM_PAGE_TO_PHYS(&pga[i]) != 1658 (VM_PAGE_TO_PHYS(&pga[i - 1]) + PAGE_SIZE)) || 1659 ((pqtype != PQ_FREE) && (pqtype != PQ_CACHE))) { 1660 start++; 1661 goto again; 1662 } 1663 } 1664 1665 for (i = start; i < (start + size / PAGE_SIZE); i++) { 1666 int pqtype; 1667 vm_page_t m = &pga[i]; 1668 1669 pqtype = m->queue - m->pc; 1670 if (pqtype == PQ_CACHE) { 1671 vm_page_busy(m); 1672 vm_page_free(m); 1673 } 1674 1675 TAILQ_REMOVE(vm_page_queues[m->queue].pl, m, pageq); 1676 (*vm_page_queues[m->queue].lcnt)--; 1677 cnt.v_free_count--; 1678 m->valid = VM_PAGE_BITS_ALL; 1679 m->flags = 0; 1680 m->dirty = 0; 1681 m->wire_count = 0; 1682 m->busy = 0; 1683 m->queue = PQ_NONE; 1684 m->object = NULL; 1685 vm_page_wire(m); 1686 } 1687 1688 /* 1689 * We've found a contiguous chunk that meets are requirements. 1690 * Allocate kernel VM, unfree and assign the physical pages to it and 1691 * return kernel VM pointer. 1692 */ 1693 tmp_addr = addr = kmem_alloc_pageable(map, size); 1694 if (addr == 0) { 1695 /* 1696 * XXX We almost never run out of kernel virtual 1697 * space, so we don't make the allocated memory 1698 * above available. 1699 */ 1700 splx(s); 1701 return (NULL); 1702 } 1703 1704 for (i = start; i < (start + size / PAGE_SIZE); i++) { 1705 vm_page_t m = &pga[i]; 1706 vm_page_insert(m, kernel_object, 1707 OFF_TO_IDX(tmp_addr - VM_MIN_KERNEL_ADDRESS)); 1708 pmap_kenter(tmp_addr, VM_PAGE_TO_PHYS(m)); 1709 tmp_addr += PAGE_SIZE; 1710 } 1711 1712 splx(s); 1713 return ((void *)addr); 1714 } 1715 return NULL; 1716} 1717 1718void * 1719contigmalloc(size, type, flags, low, high, alignment, boundary) 1720 unsigned long size; /* should be size_t here and for malloc() */ 1721 struct malloc_type *type; 1722 int flags; 1723 unsigned long low; 1724 unsigned long high; 1725 unsigned long alignment; 1726 unsigned long boundary; 1727{ 1728 return contigmalloc1(size, type, flags, low, high, alignment, boundary, 1729 kernel_map); 1730} 1731 1732vm_offset_t 1733vm_page_alloc_contig(size, low, high, alignment) 1734 vm_offset_t size; 1735 vm_offset_t low; 1736 vm_offset_t high; 1737 vm_offset_t alignment; 1738{ 1739 return ((vm_offset_t)contigmalloc1(size, M_DEVBUF, M_NOWAIT, low, high, 1740 alignment, 0ul, kernel_map)); 1741} 1742 1743#include "opt_ddb.h" 1744#ifdef DDB 1745#include <sys/kernel.h> 1746 1747#include <ddb/ddb.h> 1748 1749DB_SHOW_COMMAND(page, vm_page_print_page_info) 1750{ 1751 db_printf("cnt.v_free_count: %d\n", cnt.v_free_count); 1752 db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count); 1753 db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count); 1754 db_printf("cnt.v_active_count: %d\n", cnt.v_active_count); 1755 db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count); 1756 db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved); 1757 db_printf("cnt.v_free_min: %d\n", cnt.v_free_min); 1758 db_printf("cnt.v_free_target: %d\n", cnt.v_free_target); 1759 db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min); 1760 db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target); 1761} 1762 1763DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info) 1764{ 1765 int i; 1766 db_printf("PQ_FREE:"); 1767 for(i=0;i<PQ_L2_SIZE;i++) { 1768 db_printf(" %d", *vm_page_queues[PQ_FREE + i].lcnt); 1769 } 1770 db_printf("\n"); 1771 1772 db_printf("PQ_CACHE:"); 1773 for(i=0;i<PQ_L2_SIZE;i++) { 1774 db_printf(" %d", *vm_page_queues[PQ_CACHE + i].lcnt); 1775 } 1776 db_printf("\n"); 1777 1778 db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n", 1779 *vm_page_queues[PQ_ACTIVE].lcnt, 1780 *vm_page_queues[PQ_INACTIVE].lcnt); 1781} 1782#endif /* DDB */ 1783