vm_page.c revision 40546
1234949Sbapt/* 2234949Sbapt * Copyright (c) 1991 Regents of the University of California. 3234949Sbapt * All rights reserved. 4234949Sbapt * 5234949Sbapt * This code is derived from software contributed to Berkeley by 6234949Sbapt * The Mach Operating System project at Carnegie-Mellon University. 7234949Sbapt * 8234949Sbapt * Redistribution and use in source and binary forms, with or without 9234949Sbapt * modification, are permitted provided that the following conditions 10234949Sbapt * are met: 11234949Sbapt * 1. Redistributions of source code must retain the above copyright 12234949Sbapt * notice, this list of conditions and the following disclaimer. 13234949Sbapt * 2. Redistributions in binary form must reproduce the above copyright 14234949Sbapt * notice, this list of conditions and the following disclaimer in the 15234949Sbapt * documentation and/or other materials provided with the distribution. 16234949Sbapt * 3. All advertising materials mentioning features or use of this software 17234949Sbapt * must display the following acknowledgement: 18234949Sbapt * This product includes software developed by the University of 19234949Sbapt * California, Berkeley and its contributors. 20234949Sbapt * 4. Neither the name of the University nor the names of its contributors 21234949Sbapt * may be used to endorse or promote products derived from this software 22234949Sbapt * without specific prior written permission. 23234949Sbapt * 24234949Sbapt * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25234949Sbapt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26234949Sbapt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27234949Sbapt * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28234949Sbapt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29234949Sbapt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30234949Sbapt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31234949Sbapt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32234949Sbapt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33234949Sbapt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34234949Sbapt * SUCH DAMAGE. 35234949Sbapt * 36234949Sbapt * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 37234949Sbapt * $Id: vm_page.c,v 1.107 1998/09/04 08:06:57 dfr Exp $ 38234949Sbapt */ 39234949Sbapt 40234949Sbapt/* 41234949Sbapt * Copyright (c) 1987, 1990 Carnegie-Mellon University. 42 * All rights reserved. 43 * 44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 45 * 46 * Permission to use, copy, modify and distribute this software and 47 * its documentation is hereby granted, provided that both the copyright 48 * notice and this permission notice appear in all copies of the 49 * software, derivative works or modified versions, and any portions 50 * thereof, and that both notices appear in supporting documentation. 51 * 52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 53 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 55 * 56 * Carnegie Mellon requests users of this software to return to 57 * 58 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 59 * School of Computer Science 60 * Carnegie Mellon University 61 * Pittsburgh PA 15213-3890 62 * 63 * any improvements or extensions that they make and grant Carnegie the 64 * rights to redistribute these changes. 65 */ 66 67/* 68 * Resident memory management module. 69 */ 70 71#include <sys/param.h> 72#include <sys/systm.h> 73#include <sys/malloc.h> 74#include <sys/proc.h> 75#include <sys/vmmeter.h> 76#include <sys/vnode.h> 77 78#include <vm/vm.h> 79#include <vm/vm_param.h> 80#include <vm/vm_prot.h> 81#include <sys/lock.h> 82#include <vm/vm_kern.h> 83#include <vm/vm_object.h> 84#include <vm/vm_page.h> 85#include <vm/vm_pageout.h> 86#include <vm/vm_extern.h> 87 88static void vm_page_queue_init __P((void)); 89static vm_page_t vm_page_select_free __P((vm_object_t object, 90 vm_pindex_t pindex, int prefqueue)); 91static vm_page_t vm_page_select_cache __P((vm_object_t, vm_pindex_t)); 92 93/* 94 * Associated with page of user-allocatable memory is a 95 * page structure. 96 */ 97 98static struct pglist *vm_page_buckets; /* Array of buckets */ 99static int vm_page_bucket_count; /* How big is array? */ 100static int vm_page_hash_mask; /* Mask for hash function */ 101static volatile int vm_page_bucket_generation; 102 103struct pglist vm_page_queue_free[PQ_L2_SIZE] = {0}; 104struct pglist vm_page_queue_zero[PQ_L2_SIZE] = {0}; 105struct pglist vm_page_queue_active = {0}; 106struct pglist vm_page_queue_inactive = {0}; 107struct pglist vm_page_queue_cache[PQ_L2_SIZE] = {0}; 108 109static int no_queue=0; 110 111struct vpgqueues vm_page_queues[PQ_COUNT] = {0}; 112static int pqcnt[PQ_COUNT] = {0}; 113 114static void 115vm_page_queue_init(void) { 116 int i; 117 118 vm_page_queues[PQ_NONE].pl = NULL; 119 vm_page_queues[PQ_NONE].cnt = &no_queue; 120 for(i=0;i<PQ_L2_SIZE;i++) { 121 vm_page_queues[PQ_FREE+i].pl = &vm_page_queue_free[i]; 122 vm_page_queues[PQ_FREE+i].cnt = &cnt.v_free_count; 123 } 124 for(i=0;i<PQ_L2_SIZE;i++) { 125 vm_page_queues[PQ_ZERO+i].pl = &vm_page_queue_zero[i]; 126 vm_page_queues[PQ_ZERO+i].cnt = &cnt.v_free_count; 127 } 128 vm_page_queues[PQ_INACTIVE].pl = &vm_page_queue_inactive; 129 vm_page_queues[PQ_INACTIVE].cnt = &cnt.v_inactive_count; 130 131 vm_page_queues[PQ_ACTIVE].pl = &vm_page_queue_active; 132 vm_page_queues[PQ_ACTIVE].cnt = &cnt.v_active_count; 133 for(i=0;i<PQ_L2_SIZE;i++) { 134 vm_page_queues[PQ_CACHE+i].pl = &vm_page_queue_cache[i]; 135 vm_page_queues[PQ_CACHE+i].cnt = &cnt.v_cache_count; 136 } 137 for(i=0;i<PQ_COUNT;i++) { 138 if (vm_page_queues[i].pl) { 139 TAILQ_INIT(vm_page_queues[i].pl); 140 } else if (i != 0) { 141 panic("vm_page_queue_init: queue %d is null", i); 142 } 143 vm_page_queues[i].lcnt = &pqcnt[i]; 144 } 145} 146 147vm_page_t vm_page_array = 0; 148static int vm_page_array_size = 0; 149long first_page = 0; 150static long last_page; 151static vm_size_t page_mask; 152static int page_shift; 153int vm_page_zero_count = 0; 154 155/* 156 * map of contiguous valid DEV_BSIZE chunks in a page 157 * (this list is valid for page sizes upto 16*DEV_BSIZE) 158 */ 159static u_short vm_page_dev_bsize_chunks[] = { 160 0x0, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 161 0x1ff, 0x3ff, 0x7ff, 0xfff, 0x1fff, 0x3fff, 0x7fff, 0xffff 162}; 163 164static __inline int vm_page_hash __P((vm_object_t object, vm_pindex_t pindex)); 165static int vm_page_freechk_and_unqueue __P((vm_page_t m)); 166static void vm_page_free_wakeup __P((void)); 167 168/* 169 * vm_set_page_size: 170 * 171 * Sets the page size, perhaps based upon the memory 172 * size. Must be called before any use of page-size 173 * dependent functions. 174 * 175 * Sets page_shift and page_mask from cnt.v_page_size. 176 */ 177void 178vm_set_page_size() 179{ 180 181 if (cnt.v_page_size == 0) 182 cnt.v_page_size = DEFAULT_PAGE_SIZE; 183 page_mask = cnt.v_page_size - 1; 184 if ((page_mask & cnt.v_page_size) != 0) 185 panic("vm_set_page_size: page size not a power of two"); 186 for (page_shift = 0;; page_shift++) 187 if ((1 << page_shift) == cnt.v_page_size) 188 break; 189} 190 191/* 192 * vm_page_startup: 193 * 194 * Initializes the resident memory module. 195 * 196 * Allocates memory for the page cells, and 197 * for the object/offset-to-page hash table headers. 198 * Each page cell is initialized and placed on the free list. 199 */ 200 201vm_offset_t 202vm_page_startup(starta, enda, vaddr) 203 register vm_offset_t starta; 204 vm_offset_t enda; 205 register vm_offset_t vaddr; 206{ 207 register vm_offset_t mapped; 208 register vm_page_t m; 209 register struct pglist *bucket; 210 vm_size_t npages, page_range; 211 register vm_offset_t new_start; 212 int i; 213 vm_offset_t pa; 214 int nblocks; 215 vm_offset_t first_managed_page; 216 217 /* the biggest memory array is the second group of pages */ 218 vm_offset_t start; 219 vm_offset_t biggestone, biggestsize; 220 221 vm_offset_t total; 222 223 total = 0; 224 biggestsize = 0; 225 biggestone = 0; 226 nblocks = 0; 227 vaddr = round_page(vaddr); 228 229 for (i = 0; phys_avail[i + 1]; i += 2) { 230 phys_avail[i] = round_page(phys_avail[i]); 231 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]); 232 } 233 234 for (i = 0; phys_avail[i + 1]; i += 2) { 235 int size = phys_avail[i + 1] - phys_avail[i]; 236 237 if (size > biggestsize) { 238 biggestone = i; 239 biggestsize = size; 240 } 241 ++nblocks; 242 total += size; 243 } 244 245 start = phys_avail[biggestone]; 246 247 /* 248 * Initialize the queue headers for the free queue, the active queue 249 * and the inactive queue. 250 */ 251 252 vm_page_queue_init(); 253 254 /* 255 * Allocate (and initialize) the hash table buckets. 256 * 257 * The number of buckets MUST BE a power of 2, and the actual value is 258 * the next power of 2 greater than the number of physical pages in 259 * the system. 260 * 261 * Note: This computation can be tweaked if desired. 262 */ 263 vm_page_buckets = (struct pglist *) vaddr; 264 bucket = vm_page_buckets; 265 if (vm_page_bucket_count == 0) { 266 vm_page_bucket_count = 1; 267 while (vm_page_bucket_count < atop(total)) 268 vm_page_bucket_count <<= 1; 269 } 270 vm_page_hash_mask = vm_page_bucket_count - 1; 271 272 /* 273 * Validate these addresses. 274 */ 275 276 new_start = start + vm_page_bucket_count * sizeof(struct pglist); 277 new_start = round_page(new_start); 278 mapped = round_page(vaddr); 279 vaddr = pmap_map(mapped, start, new_start, 280 VM_PROT_READ | VM_PROT_WRITE); 281 start = new_start; 282 vaddr = round_page(vaddr); 283 bzero((caddr_t) mapped, vaddr - mapped); 284 285 for (i = 0; i < vm_page_bucket_count; i++) { 286 TAILQ_INIT(bucket); 287 bucket++; 288 } 289 290 /* 291 * Compute the number of pages of memory that will be available for 292 * use (taking into account the overhead of a page structure per 293 * page). 294 */ 295 296 first_page = phys_avail[0] / PAGE_SIZE; 297 last_page = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE; 298 299 page_range = last_page - (phys_avail[0] / PAGE_SIZE); 300 npages = (total - (page_range * sizeof(struct vm_page)) - 301 (start - phys_avail[biggestone])) / PAGE_SIZE; 302 303 /* 304 * Initialize the mem entry structures now, and put them in the free 305 * queue. 306 */ 307 vm_page_array = (vm_page_t) vaddr; 308 mapped = vaddr; 309 310 /* 311 * Validate these addresses. 312 */ 313 new_start = round_page(start + page_range * sizeof(struct vm_page)); 314 mapped = pmap_map(mapped, start, new_start, 315 VM_PROT_READ | VM_PROT_WRITE); 316 start = new_start; 317 318 first_managed_page = start / PAGE_SIZE; 319 320 /* 321 * Clear all of the page structures 322 */ 323 bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page)); 324 vm_page_array_size = page_range; 325 326 cnt.v_page_count = 0; 327 cnt.v_free_count = 0; 328 for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) { 329 if (i == biggestone) 330 pa = ptoa(first_managed_page); 331 else 332 pa = phys_avail[i]; 333 while (pa < phys_avail[i + 1] && npages-- > 0) { 334 ++cnt.v_page_count; 335 ++cnt.v_free_count; 336 m = PHYS_TO_VM_PAGE(pa); 337 m->phys_addr = pa; 338 m->flags = 0; 339 m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK; 340 m->queue = m->pc + PQ_FREE; 341 TAILQ_INSERT_TAIL(vm_page_queues[m->queue].pl, m, pageq); 342 ++(*vm_page_queues[m->queue].lcnt); 343 pa += PAGE_SIZE; 344 } 345 } 346 return (mapped); 347} 348 349/* 350 * vm_page_hash: 351 * 352 * Distributes the object/offset key pair among hash buckets. 353 * 354 * NOTE: This macro depends on vm_page_bucket_count being a power of 2. 355 */ 356static __inline int 357vm_page_hash(object, pindex) 358 vm_object_t object; 359 vm_pindex_t pindex; 360{ 361 return ((((uintptr_t) object) >> 5) + (pindex >> 1)) & vm_page_hash_mask; 362} 363 364/* 365 * vm_page_insert: [ internal use only ] 366 * 367 * Inserts the given mem entry into the object/object-page 368 * table and object list. 369 * 370 * The object and page must be locked, and must be splhigh. 371 */ 372 373void 374vm_page_insert(m, object, pindex) 375 register vm_page_t m; 376 register vm_object_t object; 377 register vm_pindex_t pindex; 378{ 379 register struct pglist *bucket; 380 381#if !defined(MAX_PERF) 382 if (m->flags & PG_TABLED) 383 panic("vm_page_insert: already inserted"); 384#endif 385 386 /* 387 * Record the object/offset pair in this page 388 */ 389 390 m->object = object; 391 m->pindex = pindex; 392 393 /* 394 * Insert it into the object_object/offset hash table 395 */ 396 397 bucket = &vm_page_buckets[vm_page_hash(object, pindex)]; 398 TAILQ_INSERT_TAIL(bucket, m, hashq); 399 vm_page_bucket_generation++; 400 401 /* 402 * Now link into the object's list of backed pages. 403 */ 404 405 TAILQ_INSERT_TAIL(&object->memq, m, listq); 406 vm_page_flag_set(m, PG_TABLED); 407 m->object->page_hint = m; 408 m->object->generation++; 409 410 if (m->wire_count) 411 object->wire_count++; 412 413 if ((m->queue - m->pc) == PQ_CACHE) 414 object->cache_count++; 415 416 /* 417 * And show that the object has one more resident page. 418 */ 419 420 object->resident_page_count++; 421} 422 423/* 424 * vm_page_remove: [ internal use only ] 425 * NOTE: used by device pager as well -wfj 426 * 427 * Removes the given mem entry from the object/offset-page 428 * table and the object page list. 429 * 430 * The object and page must be locked, and at splhigh. 431 */ 432 433void 434vm_page_remove(m) 435 register vm_page_t m; 436{ 437 register struct pglist *bucket; 438 vm_object_t object; 439 440 if (!(m->flags & PG_TABLED)) 441 return; 442 443#if !defined(MAX_PERF) 444 if ((m->flags & PG_BUSY) == 0) { 445 panic("vm_page_remove: page not busy"); 446 } 447#endif 448 449 vm_page_flag_clear(m, PG_BUSY); 450 if (m->flags & PG_WANTED) { 451 vm_page_flag_clear(m, PG_WANTED); 452 wakeup(m); 453 } 454 455 object = m->object; 456 if (object->page_hint == m) 457 object->page_hint = NULL; 458 459 if (m->wire_count) 460 object->wire_count--; 461 462 if ((m->queue - m->pc) == PQ_CACHE) 463 object->cache_count--; 464 465 /* 466 * Remove from the object_object/offset hash table 467 */ 468 469 bucket = &vm_page_buckets[vm_page_hash(m->object, m->pindex)]; 470 TAILQ_REMOVE(bucket, m, hashq); 471 vm_page_bucket_generation++; 472 473 /* 474 * Now remove from the object's list of backed pages. 475 */ 476 477 TAILQ_REMOVE(&object->memq, m, listq); 478 479 /* 480 * And show that the object has one fewer resident page. 481 */ 482 483 object->resident_page_count--; 484 object->generation++; 485 m->object = NULL; 486 487 vm_page_flag_clear(m, PG_TABLED); 488} 489 490/* 491 * vm_page_lookup: 492 * 493 * Returns the page associated with the object/offset 494 * pair specified; if none is found, NULL is returned. 495 * 496 * The object must be locked. No side effects. 497 */ 498 499vm_page_t 500vm_page_lookup(object, pindex) 501 register vm_object_t object; 502 register vm_pindex_t pindex; 503{ 504 register vm_page_t m; 505 register struct pglist *bucket; 506 int generation; 507 int s; 508 509 /* 510 * Search the hash table for this object/offset pair 511 */ 512 513 if (object->page_hint && (object->page_hint->pindex == pindex) && 514 (object->page_hint->object == object)) 515 return object->page_hint; 516 517retry: 518 generation = vm_page_bucket_generation; 519 bucket = &vm_page_buckets[vm_page_hash(object, pindex)]; 520 for (m = TAILQ_FIRST(bucket); m != NULL; m = TAILQ_NEXT(m,hashq)) { 521 if ((m->object == object) && (m->pindex == pindex)) { 522 if (vm_page_bucket_generation != generation) 523 goto retry; 524 m->object->page_hint = m; 525 return (m); 526 } 527 } 528 if (vm_page_bucket_generation != generation) 529 goto retry; 530 return (NULL); 531} 532 533/* 534 * vm_page_rename: 535 * 536 * Move the given memory entry from its 537 * current object to the specified target object/offset. 538 * 539 * The object must be locked. 540 */ 541void 542vm_page_rename(m, new_object, new_pindex) 543 register vm_page_t m; 544 register vm_object_t new_object; 545 vm_pindex_t new_pindex; 546{ 547 int s; 548 549 s = splvm(); 550 vm_page_remove(m); 551 vm_page_insert(m, new_object, new_pindex); 552 splx(s); 553} 554 555/* 556 * vm_page_unqueue without any wakeup 557 */ 558void 559vm_page_unqueue_nowakeup(m) 560 vm_page_t m; 561{ 562 int queue = m->queue; 563 struct vpgqueues *pq; 564 if (queue != PQ_NONE) { 565 pq = &vm_page_queues[queue]; 566 m->queue = PQ_NONE; 567 TAILQ_REMOVE(pq->pl, m, pageq); 568 (*pq->cnt)--; 569 (*pq->lcnt)--; 570 if ((queue - m->pc) == PQ_CACHE) { 571 if (m->object) 572 m->object->cache_count--; 573 } 574 } 575} 576 577/* 578 * vm_page_unqueue must be called at splhigh(); 579 */ 580void 581vm_page_unqueue(m) 582 vm_page_t m; 583{ 584 int queue = m->queue; 585 struct vpgqueues *pq; 586 if (queue != PQ_NONE) { 587 m->queue = PQ_NONE; 588 pq = &vm_page_queues[queue]; 589 TAILQ_REMOVE(pq->pl, m, pageq); 590 (*pq->cnt)--; 591 (*pq->lcnt)--; 592 if ((queue - m->pc) == PQ_CACHE) { 593 if ((cnt.v_cache_count + cnt.v_free_count) < 594 (cnt.v_free_reserved + cnt.v_cache_min)) 595 pagedaemon_wakeup(); 596 if (m->object) 597 m->object->cache_count--; 598 } 599 } 600} 601 602/* 603 * Find a page on the specified queue with color optimization. 604 */ 605vm_page_t 606vm_page_list_find(basequeue, index) 607 int basequeue, index; 608{ 609#if PQ_L2_SIZE > 1 610 611 int i,j; 612 vm_page_t m; 613 int hindex; 614 struct vpgqueues *pq; 615 616 pq = &vm_page_queues[basequeue]; 617 618 m = TAILQ_FIRST(pq[index].pl); 619 if (m) 620 return m; 621 622 for(j = 0; j < PQ_L1_SIZE; j++) { 623 int ij; 624 for(i = (PQ_L2_SIZE / 2) - PQ_L1_SIZE; 625 (ij = i + j) > 0; 626 i -= PQ_L1_SIZE) { 627 628 hindex = index + ij; 629 if (hindex >= PQ_L2_SIZE) 630 hindex -= PQ_L2_SIZE; 631 if (m = TAILQ_FIRST(pq[hindex].pl)) 632 return m; 633 634 hindex = index - ij; 635 if (hindex < 0) 636 hindex += PQ_L2_SIZE; 637 if (m = TAILQ_FIRST(pq[hindex].pl)) 638 return m; 639 } 640 } 641 642 hindex = index + PQ_L2_SIZE / 2; 643 if (hindex >= PQ_L2_SIZE) 644 hindex -= PQ_L2_SIZE; 645 m = TAILQ_FIRST(pq[hindex].pl); 646 if (m) 647 return m; 648 649 return NULL; 650#else 651 return TAILQ_FIRST(vm_page_queues[basequeue].pl); 652#endif 653 654} 655 656/* 657 * Find a page on the specified queue with color optimization. 658 */ 659vm_page_t 660vm_page_select(object, pindex, basequeue) 661 vm_object_t object; 662 vm_pindex_t pindex; 663 int basequeue; 664{ 665 666#if PQ_L2_SIZE > 1 667 int index; 668 index = (pindex + object->pg_color) & PQ_L2_MASK; 669 return vm_page_list_find(basequeue, index); 670 671#else 672 return TAILQ_FIRST(vm_page_queues[basequeue].pl); 673#endif 674 675} 676 677/* 678 * Find a page on the cache queue with color optimization. As pages 679 * might be found, but not applicable, they are deactivated. This 680 * keeps us from using potentially busy cached pages. 681 */ 682vm_page_t 683vm_page_select_cache(object, pindex) 684 vm_object_t object; 685 vm_pindex_t pindex; 686{ 687 vm_page_t m; 688 689 while (TRUE) { 690#if PQ_L2_SIZE > 1 691 int index; 692 index = (pindex + object->pg_color) & PQ_L2_MASK; 693 m = vm_page_list_find(PQ_CACHE, index); 694 695#else 696 m = TAILQ_FIRST(vm_page_queues[PQ_CACHE].pl); 697#endif 698 if (m && ((m->flags & PG_BUSY) || m->busy || 699 m->hold_count || m->wire_count)) { 700 vm_page_deactivate(m); 701 continue; 702 } 703 return m; 704 } 705} 706 707/* 708 * Find a free or zero page, with specified preference. 709 */ 710static vm_page_t 711vm_page_select_free(object, pindex, prefqueue) 712 vm_object_t object; 713 vm_pindex_t pindex; 714 int prefqueue; 715{ 716#if PQ_L2_SIZE > 1 717 int i,j; 718 int index, hindex; 719#endif 720 vm_page_t m, mh; 721 int oqueuediff; 722 struct vpgqueues *pq; 723 724 if (prefqueue == PQ_ZERO) 725 oqueuediff = PQ_FREE - PQ_ZERO; 726 else 727 oqueuediff = PQ_ZERO - PQ_FREE; 728 729 if (mh = object->page_hint) { 730 if (mh->pindex == (pindex - 1)) { 731 if ((mh->flags & PG_FICTITIOUS) == 0) { 732 if ((mh < &vm_page_array[cnt.v_page_count-1]) && 733 (mh >= &vm_page_array[0])) { 734 int queue; 735 m = mh + 1; 736 if (VM_PAGE_TO_PHYS(m) == (VM_PAGE_TO_PHYS(mh) + PAGE_SIZE)) { 737 queue = m->queue - m->pc; 738 if (queue == PQ_FREE || queue == PQ_ZERO) { 739 return m; 740 } 741 } 742 } 743 } 744 } 745 } 746 747 pq = &vm_page_queues[prefqueue]; 748 749#if PQ_L2_SIZE > 1 750 751 index = (pindex + object->pg_color) & PQ_L2_MASK; 752 753 if (m = TAILQ_FIRST(pq[index].pl)) 754 return m; 755 if (m = TAILQ_FIRST(pq[index + oqueuediff].pl)) 756 return m; 757 758 for(j = 0; j < PQ_L1_SIZE; j++) { 759 int ij; 760 for(i = (PQ_L2_SIZE / 2) - PQ_L1_SIZE; 761 (ij = i + j) >= 0; 762 i -= PQ_L1_SIZE) { 763 764 hindex = index + ij; 765 if (hindex >= PQ_L2_SIZE) 766 hindex -= PQ_L2_SIZE; 767 if (m = TAILQ_FIRST(pq[hindex].pl)) 768 return m; 769 if (m = TAILQ_FIRST(pq[hindex + oqueuediff].pl)) 770 return m; 771 772 hindex = index - ij; 773 if (hindex < 0) 774 hindex += PQ_L2_SIZE; 775 if (m = TAILQ_FIRST(pq[hindex].pl)) 776 return m; 777 if (m = TAILQ_FIRST(pq[hindex + oqueuediff].pl)) 778 return m; 779 } 780 } 781 782 hindex = index + PQ_L2_SIZE / 2; 783 if (hindex >= PQ_L2_SIZE) 784 hindex -= PQ_L2_SIZE; 785 if (m = TAILQ_FIRST(pq[hindex].pl)) 786 return m; 787 if (m = TAILQ_FIRST(pq[hindex+oqueuediff].pl)) 788 return m; 789 790#else 791 if (m = TAILQ_FIRST(pq[0].pl)) 792 return m; 793 else 794 return TAILQ_FIRST(pq[oqueuediff].pl); 795#endif 796 797 return NULL; 798} 799 800/* 801 * vm_page_alloc: 802 * 803 * Allocate and return a memory cell associated 804 * with this VM object/offset pair. 805 * 806 * page_req classes: 807 * VM_ALLOC_NORMAL normal process request 808 * VM_ALLOC_SYSTEM system *really* needs a page 809 * VM_ALLOC_INTERRUPT interrupt time request 810 * VM_ALLOC_ZERO zero page 811 * 812 * Object must be locked. 813 */ 814vm_page_t 815vm_page_alloc(object, pindex, page_req) 816 vm_object_t object; 817 vm_pindex_t pindex; 818 int page_req; 819{ 820 register vm_page_t m; 821 struct vpgqueues *pq; 822 vm_object_t oldobject; 823 int queue, qtype; 824 int s; 825 826#ifdef DIAGNOSTIC 827 m = vm_page_lookup(object, pindex); 828 if (m) 829 panic("vm_page_alloc: page already allocated"); 830#endif 831 832 if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) { 833 page_req = VM_ALLOC_SYSTEM; 834 }; 835 836 s = splvm(); 837 838 switch (page_req) { 839 840 case VM_ALLOC_NORMAL: 841 if (cnt.v_free_count >= cnt.v_free_reserved) { 842 m = vm_page_select_free(object, pindex, PQ_FREE); 843#if defined(DIAGNOSTIC) 844 if (m == NULL) 845 panic("vm_page_alloc(NORMAL): missing page on free queue\n"); 846#endif 847 } else { 848 m = vm_page_select_cache(object, pindex); 849 if (m == NULL) { 850 splx(s); 851#if defined(DIAGNOSTIC) 852 if (cnt.v_cache_count > 0) 853 printf("vm_page_alloc(NORMAL): missing pages on cache queue: %d\n", cnt.v_cache_count); 854#endif 855 vm_pageout_deficit++; 856 pagedaemon_wakeup(); 857 return (NULL); 858 } 859 } 860 break; 861 862 case VM_ALLOC_ZERO: 863 if (cnt.v_free_count >= cnt.v_free_reserved) { 864 m = vm_page_select_free(object, pindex, PQ_ZERO); 865#if defined(DIAGNOSTIC) 866 if (m == NULL) 867 panic("vm_page_alloc(ZERO): missing page on free queue\n"); 868#endif 869 } else { 870 m = vm_page_select_cache(object, pindex); 871 if (m == NULL) { 872 splx(s); 873#if defined(DIAGNOSTIC) 874 if (cnt.v_cache_count > 0) 875 printf("vm_page_alloc(ZERO): missing pages on cache queue: %d\n", cnt.v_cache_count); 876#endif 877 vm_pageout_deficit++; 878 pagedaemon_wakeup(); 879 return (NULL); 880 } 881 } 882 break; 883 884 case VM_ALLOC_SYSTEM: 885 if ((cnt.v_free_count >= cnt.v_free_reserved) || 886 ((cnt.v_cache_count == 0) && 887 (cnt.v_free_count >= cnt.v_interrupt_free_min))) { 888 m = vm_page_select_free(object, pindex, PQ_FREE); 889#if defined(DIAGNOSTIC) 890 if (m == NULL) 891 panic("vm_page_alloc(SYSTEM): missing page on free queue\n"); 892#endif 893 } else { 894 m = vm_page_select_cache(object, pindex); 895 if (m == NULL) { 896 splx(s); 897#if defined(DIAGNOSTIC) 898 if (cnt.v_cache_count > 0) 899 printf("vm_page_alloc(SYSTEM): missing pages on cache queue: %d\n", cnt.v_cache_count); 900#endif 901 vm_pageout_deficit++; 902 pagedaemon_wakeup(); 903 return (NULL); 904 } 905 } 906 break; 907 908 case VM_ALLOC_INTERRUPT: 909 if (cnt.v_free_count > 0) { 910 m = vm_page_select_free(object, pindex, PQ_FREE); 911#if defined(DIAGNOSTIC) 912 if (m == NULL) 913 panic("vm_page_alloc(INTERRUPT): missing page on free queue\n"); 914#endif 915 } else { 916 splx(s); 917 vm_pageout_deficit++; 918 pagedaemon_wakeup(); 919 return (NULL); 920 } 921 break; 922 923 default: 924 m = NULL; 925#if !defined(MAX_PERF) 926 panic("vm_page_alloc: invalid allocation class"); 927#endif 928 } 929 930 queue = m->queue; 931 qtype = queue - m->pc; 932 if (qtype == PQ_ZERO) 933 vm_page_zero_count--; 934 pq = &vm_page_queues[queue]; 935 TAILQ_REMOVE(pq->pl, m, pageq); 936 (*pq->cnt)--; 937 (*pq->lcnt)--; 938 oldobject = NULL; 939 if (qtype == PQ_ZERO) { 940 m->flags = PG_ZERO | PG_BUSY; 941 } else if (qtype == PQ_CACHE) { 942 oldobject = m->object; 943 vm_page_busy(m); 944 vm_page_remove(m); 945 m->flags = PG_BUSY; 946 } else { 947 m->flags = PG_BUSY; 948 } 949 m->wire_count = 0; 950 m->hold_count = 0; 951 m->act_count = 0; 952 m->busy = 0; 953 m->valid = 0; 954 m->dirty = 0; 955 m->queue = PQ_NONE; 956 957 /* XXX before splx until vm_page_insert is safe */ 958 vm_page_insert(m, object, pindex); 959 960 /* 961 * Don't wakeup too often - wakeup the pageout daemon when 962 * we would be nearly out of memory. 963 */ 964 if (((cnt.v_free_count + cnt.v_cache_count) < 965 (cnt.v_free_reserved + cnt.v_cache_min)) || 966 (cnt.v_free_count < cnt.v_pageout_free_min)) 967 pagedaemon_wakeup(); 968 969 if ((qtype == PQ_CACHE) && 970 ((page_req == VM_ALLOC_NORMAL) || (page_req == VM_ALLOC_ZERO)) && 971 oldobject && (oldobject->type == OBJT_VNODE) && 972 ((oldobject->flags & OBJ_DEAD) == 0)) { 973 struct vnode *vp; 974 vp = (struct vnode *) oldobject->handle; 975 if (vp && VSHOULDFREE(vp)) { 976 if ((vp->v_flag & (VFREE|VTBFREE|VDOOMED)) == 0) { 977 TAILQ_INSERT_TAIL(&vnode_tobefree_list, vp, v_freelist); 978 vp->v_flag |= VTBFREE; 979 } 980 } 981 } 982 splx(s); 983 984 return (m); 985} 986 987void 988vm_wait() 989{ 990 int s; 991 992 s = splvm(); 993 if (curproc == pageproc) { 994 vm_pageout_pages_needed = 1; 995 tsleep(&vm_pageout_pages_needed, PSWP, "vmwait", 0); 996 } else { 997 if (!vm_pages_needed) { 998 vm_pages_needed++; 999 wakeup(&vm_pages_needed); 1000 } 1001 tsleep(&cnt.v_free_count, PVM, "vmwait", 0); 1002 } 1003 splx(s); 1004} 1005 1006int 1007vm_page_sleep(vm_page_t m, char *msg, char *busy) { 1008 vm_object_t object = m->object; 1009 int slept = 0; 1010 if ((busy && *busy) || (m->flags & PG_BUSY)) { 1011 int s; 1012 s = splvm(); 1013 if ((busy && *busy) || (m->flags & PG_BUSY)) { 1014 vm_page_flag_set(m, PG_WANTED); 1015 tsleep(m, PVM, msg, 0); 1016 slept = 1; 1017 } 1018 splx(s); 1019 } 1020 return slept; 1021} 1022 1023/* 1024 * vm_page_activate: 1025 * 1026 * Put the specified page on the active list (if appropriate). 1027 * 1028 * The page queues must be locked. 1029 */ 1030void 1031vm_page_activate(m) 1032 register vm_page_t m; 1033{ 1034 int s; 1035 vm_page_t np; 1036 vm_object_t object; 1037 1038 s = splvm(); 1039 if (m->queue != PQ_ACTIVE) { 1040 if ((m->queue - m->pc) == PQ_CACHE) 1041 cnt.v_reactivated++; 1042 1043 vm_page_unqueue(m); 1044 1045 if (m->wire_count == 0) { 1046 m->queue = PQ_ACTIVE; 1047 ++(*vm_page_queues[PQ_ACTIVE].lcnt); 1048 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 1049 if (m->act_count < ACT_INIT) 1050 m->act_count = ACT_INIT; 1051 cnt.v_active_count++; 1052 } 1053 } else { 1054 if (m->act_count < ACT_INIT) 1055 m->act_count = ACT_INIT; 1056 } 1057 1058 splx(s); 1059} 1060 1061/* 1062 * helper routine for vm_page_free and vm_page_free_zero 1063 */ 1064static int 1065vm_page_freechk_and_unqueue(m) 1066 vm_page_t m; 1067{ 1068 vm_object_t oldobject; 1069 1070 oldobject = m->object; 1071 1072#if !defined(MAX_PERF) 1073 if (m->busy || ((m->queue - m->pc) == PQ_FREE) || 1074 (m->hold_count != 0)) { 1075 printf( 1076 "vm_page_free: pindex(%lu), busy(%d), PG_BUSY(%d), hold(%d)\n", 1077 (u_long)m->pindex, m->busy, (m->flags & PG_BUSY) ? 1 : 0, 1078 m->hold_count); 1079 if ((m->queue - m->pc) == PQ_FREE) 1080 panic("vm_page_free: freeing free page"); 1081 else 1082 panic("vm_page_free: freeing busy page"); 1083 } 1084#endif 1085 1086 vm_page_unqueue_nowakeup(m); 1087 vm_page_remove(m); 1088 1089 if ((m->flags & PG_FICTITIOUS) != 0) { 1090 return 0; 1091 } 1092 1093 m->valid = 0; 1094 1095 if (m->wire_count != 0) { 1096#if !defined(MAX_PERF) 1097 if (m->wire_count > 1) { 1098 panic("vm_page_free: invalid wire count (%d), pindex: 0x%x", 1099 m->wire_count, m->pindex); 1100 } 1101#endif 1102 printf("vm_page_free: freeing wired page\n"); 1103 m->wire_count = 0; 1104 if (m->object) 1105 m->object->wire_count--; 1106 cnt.v_wire_count--; 1107 } 1108 1109 if (oldobject && (oldobject->type == OBJT_VNODE) && 1110 ((oldobject->flags & OBJ_DEAD) == 0)) { 1111 struct vnode *vp; 1112 vp = (struct vnode *) oldobject->handle; 1113 if (vp && VSHOULDFREE(vp)) { 1114 if ((vp->v_flag & (VTBFREE|VDOOMED|VFREE)) == 0) { 1115 TAILQ_INSERT_TAIL(&vnode_tobefree_list, vp, v_freelist); 1116 vp->v_flag |= VTBFREE; 1117 } 1118 } 1119 } 1120 1121#ifdef __alpha__ 1122 pmap_page_is_free(m); 1123#endif 1124 1125 return 1; 1126} 1127 1128/* 1129 * helper routine for vm_page_free and vm_page_free_zero 1130 */ 1131static __inline void 1132vm_page_free_wakeup() 1133{ 1134 1135/* 1136 * if pageout daemon needs pages, then tell it that there are 1137 * some free. 1138 */ 1139 if (vm_pageout_pages_needed) { 1140 wakeup(&vm_pageout_pages_needed); 1141 vm_pageout_pages_needed = 0; 1142 } 1143 /* 1144 * wakeup processes that are waiting on memory if we hit a 1145 * high water mark. And wakeup scheduler process if we have 1146 * lots of memory. this process will swapin processes. 1147 */ 1148 if (vm_pages_needed && 1149 ((cnt.v_free_count + cnt.v_cache_count) >= cnt.v_free_min)) { 1150 wakeup(&cnt.v_free_count); 1151 vm_pages_needed = 0; 1152 } 1153} 1154 1155/* 1156 * vm_page_free: 1157 * 1158 * Returns the given page to the free list, 1159 * disassociating it with any VM object. 1160 * 1161 * Object and page must be locked prior to entry. 1162 */ 1163void 1164vm_page_free(m) 1165 register vm_page_t m; 1166{ 1167 int s; 1168 struct vpgqueues *pq; 1169 1170 s = splvm(); 1171 1172 cnt.v_tfree++; 1173 1174 if (!vm_page_freechk_and_unqueue(m)) { 1175 splx(s); 1176 return; 1177 } 1178 1179 m->queue = PQ_FREE + m->pc; 1180 pq = &vm_page_queues[m->queue]; 1181 ++(*pq->lcnt); 1182 ++(*pq->cnt); 1183 /* 1184 * If the pageout process is grabbing the page, it is likely 1185 * that the page is NOT in the cache. It is more likely that 1186 * the page will be partially in the cache if it is being 1187 * explicitly freed. 1188 */ 1189 if (curproc == pageproc) { 1190 TAILQ_INSERT_TAIL(pq->pl, m, pageq); 1191 } else { 1192 TAILQ_INSERT_HEAD(pq->pl, m, pageq); 1193 } 1194 1195 vm_page_free_wakeup(); 1196 splx(s); 1197} 1198 1199void 1200vm_page_free_zero(m) 1201 register vm_page_t m; 1202{ 1203 int s; 1204 struct vpgqueues *pq; 1205 1206 s = splvm(); 1207 1208 cnt.v_tfree++; 1209 1210 if (!vm_page_freechk_and_unqueue(m)) { 1211 splx(s); 1212 return; 1213 } 1214 1215 m->queue = PQ_ZERO + m->pc; 1216 pq = &vm_page_queues[m->queue]; 1217 ++(*pq->lcnt); 1218 ++(*pq->cnt); 1219 1220 TAILQ_INSERT_HEAD(pq->pl, m, pageq); 1221 ++vm_page_zero_count; 1222 vm_page_free_wakeup(); 1223 splx(s); 1224} 1225 1226/* 1227 * vm_page_wire: 1228 * 1229 * Mark this page as wired down by yet 1230 * another map, removing it from paging queues 1231 * as necessary. 1232 * 1233 * The page queues must be locked. 1234 */ 1235void 1236vm_page_wire(m) 1237 register vm_page_t m; 1238{ 1239 int s; 1240 1241 if (m->wire_count == 0) { 1242 s = splvm(); 1243 vm_page_unqueue(m); 1244 splx(s); 1245 cnt.v_wire_count++; 1246 if (m->object) 1247 m->object->wire_count++; 1248 } 1249 (*vm_page_queues[PQ_NONE].lcnt)++; 1250 m->wire_count++; 1251 vm_page_flag_set(m, PG_MAPPED); 1252} 1253 1254/* 1255 * vm_page_unwire: 1256 * 1257 * Release one wiring of this page, potentially 1258 * enabling it to be paged again. 1259 * 1260 * The page queues must be locked. 1261 */ 1262void 1263vm_page_unwire(m) 1264 register vm_page_t m; 1265{ 1266 int s; 1267 1268 s = splvm(); 1269 1270 if (m->wire_count > 0) { 1271 m->wire_count--; 1272 if (m->wire_count == 0) { 1273 if (m->object) 1274 m->object->wire_count--; 1275 cnt.v_wire_count--; 1276 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 1277 m->queue = PQ_ACTIVE; 1278 (*vm_page_queues[PQ_ACTIVE].lcnt)++; 1279 cnt.v_active_count++; 1280 } 1281 } else { 1282#if !defined(MAX_PERF) 1283 panic("vm_page_unwire: invalid wire count: %d\n", m->wire_count); 1284#endif 1285 } 1286 splx(s); 1287} 1288 1289 1290/* 1291 * vm_page_deactivate: 1292 * 1293 * Returns the given page to the inactive list, 1294 * indicating that no physical maps have access 1295 * to this page. [Used by the physical mapping system.] 1296 * 1297 * The page queues must be locked. 1298 */ 1299void 1300vm_page_deactivate(m) 1301 register vm_page_t m; 1302{ 1303 int s; 1304 1305 /* 1306 * Only move active pages -- ignore locked or already inactive ones. 1307 * 1308 * XXX: sometimes we get pages which aren't wired down or on any queue - 1309 * we need to put them on the inactive queue also, otherwise we lose 1310 * track of them. Paul Mackerras (paulus@cs.anu.edu.au) 9-Jan-93. 1311 */ 1312 if (m->queue == PQ_INACTIVE) 1313 return; 1314 1315 s = splvm(); 1316 if (m->wire_count == 0) { 1317 if ((m->queue - m->pc) == PQ_CACHE) 1318 cnt.v_reactivated++; 1319 vm_page_unqueue(m); 1320 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 1321 m->queue = PQ_INACTIVE; 1322 ++(*vm_page_queues[PQ_INACTIVE].lcnt); 1323 cnt.v_inactive_count++; 1324 } 1325 splx(s); 1326} 1327 1328/* 1329 * vm_page_cache 1330 * 1331 * Put the specified page onto the page cache queue (if appropriate). 1332 */ 1333void 1334vm_page_cache(m) 1335 register vm_page_t m; 1336{ 1337 int s; 1338 1339#if !defined(MAX_PERF) 1340 if ((m->flags & PG_BUSY) || m->busy || m->wire_count) { 1341 printf("vm_page_cache: attempting to cache busy page\n"); 1342 return; 1343 } 1344#endif 1345 if ((m->queue - m->pc) == PQ_CACHE) 1346 return; 1347 1348 vm_page_protect(m, VM_PROT_NONE); 1349#if !defined(MAX_PERF) 1350 if (m->dirty != 0) { 1351 panic("vm_page_cache: caching a dirty page, pindex: %d", m->pindex); 1352 } 1353#endif 1354 s = splvm(); 1355 vm_page_unqueue_nowakeup(m); 1356 m->queue = PQ_CACHE + m->pc; 1357 (*vm_page_queues[m->queue].lcnt)++; 1358 TAILQ_INSERT_TAIL(vm_page_queues[m->queue].pl, m, pageq); 1359 cnt.v_cache_count++; 1360 m->object->cache_count++; 1361 vm_page_free_wakeup(); 1362 splx(s); 1363} 1364 1365/* 1366 * Grab a page, waiting until we are waken up due to the page 1367 * changing state. We keep on waiting, if the page continues 1368 * to be in the object. If the page doesn't exist, allocate it. 1369 */ 1370vm_page_t 1371vm_page_grab(object, pindex, allocflags) 1372 vm_object_t object; 1373 vm_pindex_t pindex; 1374 int allocflags; 1375{ 1376 1377 vm_page_t m; 1378 int s, generation; 1379 1380retrylookup: 1381 if ((m = vm_page_lookup(object, pindex)) != NULL) { 1382 if (m->busy || (m->flags & PG_BUSY)) { 1383 generation = object->generation; 1384 1385 s = splvm(); 1386 while ((object->generation == generation) && 1387 (m->busy || (m->flags & PG_BUSY))) { 1388 vm_page_flag_set(m, PG_WANTED | PG_REFERENCED); 1389 tsleep(m, PVM, "pgrbwt", 0); 1390 if ((allocflags & VM_ALLOC_RETRY) == 0) { 1391 splx(s); 1392 return NULL; 1393 } 1394 } 1395 splx(s); 1396 goto retrylookup; 1397 } else { 1398 vm_page_busy(m); 1399 return m; 1400 } 1401 } 1402 1403 m = vm_page_alloc(object, pindex, allocflags & ~VM_ALLOC_RETRY); 1404 if (m == NULL) { 1405 VM_WAIT; 1406 if ((allocflags & VM_ALLOC_RETRY) == 0) 1407 return NULL; 1408 goto retrylookup; 1409 } 1410 1411 return m; 1412} 1413 1414/* 1415 * mapping function for valid bits or for dirty bits in 1416 * a page 1417 */ 1418__inline int 1419vm_page_bits(int base, int size) 1420{ 1421 u_short chunk; 1422 1423 if ((base == 0) && (size >= PAGE_SIZE)) 1424 return VM_PAGE_BITS_ALL; 1425 1426 size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1427 base &= PAGE_MASK; 1428 if (size > PAGE_SIZE - base) { 1429 size = PAGE_SIZE - base; 1430 } 1431 1432 base = base / DEV_BSIZE; 1433 chunk = vm_page_dev_bsize_chunks[size / DEV_BSIZE]; 1434 return (chunk << base) & VM_PAGE_BITS_ALL; 1435} 1436 1437/* 1438 * set a page valid and clean 1439 */ 1440void 1441vm_page_set_validclean(m, base, size) 1442 vm_page_t m; 1443 int base; 1444 int size; 1445{ 1446 int pagebits = vm_page_bits(base, size); 1447 m->valid |= pagebits; 1448 m->dirty &= ~pagebits; 1449 if( base == 0 && size == PAGE_SIZE) 1450 pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 1451} 1452 1453/* 1454 * set a page (partially) invalid 1455 */ 1456void 1457vm_page_set_invalid(m, base, size) 1458 vm_page_t m; 1459 int base; 1460 int size; 1461{ 1462 int bits; 1463 1464 m->valid &= ~(bits = vm_page_bits(base, size)); 1465 if (m->valid == 0) 1466 m->dirty &= ~bits; 1467 m->object->generation++; 1468} 1469 1470/* 1471 * is (partial) page valid? 1472 */ 1473int 1474vm_page_is_valid(m, base, size) 1475 vm_page_t m; 1476 int base; 1477 int size; 1478{ 1479 int bits = vm_page_bits(base, size); 1480 1481 if (m->valid && ((m->valid & bits) == bits)) 1482 return 1; 1483 else 1484 return 0; 1485} 1486 1487void 1488vm_page_test_dirty(m) 1489 vm_page_t m; 1490{ 1491 if ((m->dirty != VM_PAGE_BITS_ALL) && 1492 pmap_is_modified(VM_PAGE_TO_PHYS(m))) { 1493 m->dirty = VM_PAGE_BITS_ALL; 1494 } 1495} 1496 1497/* 1498 * This interface is for merging with malloc() someday. 1499 * Even if we never implement compaction so that contiguous allocation 1500 * works after initialization time, malloc()'s data structures are good 1501 * for statistics and for allocations of less than a page. 1502 */ 1503void * 1504contigmalloc1(size, type, flags, low, high, alignment, boundary, map) 1505 unsigned long size; /* should be size_t here and for malloc() */ 1506 struct malloc_type *type; 1507 int flags; 1508 unsigned long low; 1509 unsigned long high; 1510 unsigned long alignment; 1511 unsigned long boundary; 1512 vm_map_t map; 1513{ 1514 int i, s, start; 1515 vm_offset_t addr, phys, tmp_addr; 1516 int pass; 1517 vm_page_t pga = vm_page_array; 1518 1519 size = round_page(size); 1520#if !defined(MAX_PERF) 1521 if (size == 0) 1522 panic("contigmalloc1: size must not be 0"); 1523 if ((alignment & (alignment - 1)) != 0) 1524 panic("contigmalloc1: alignment must be a power of 2"); 1525 if ((boundary & (boundary - 1)) != 0) 1526 panic("contigmalloc1: boundary must be a power of 2"); 1527#endif 1528 1529 start = 0; 1530 for (pass = 0; pass <= 1; pass++) { 1531 s = splvm(); 1532again: 1533 /* 1534 * Find first page in array that is free, within range, aligned, and 1535 * such that the boundary won't be crossed. 1536 */ 1537 for (i = start; i < cnt.v_page_count; i++) { 1538 int pqtype; 1539 phys = VM_PAGE_TO_PHYS(&pga[i]); 1540 pqtype = pga[i].queue - pga[i].pc; 1541 if (((pqtype == PQ_ZERO) || (pqtype == PQ_FREE) || (pqtype == PQ_CACHE)) && 1542 (phys >= low) && (phys < high) && 1543 ((phys & (alignment - 1)) == 0) && 1544 (((phys ^ (phys + size - 1)) & ~(boundary - 1)) == 0)) 1545 break; 1546 } 1547 1548 /* 1549 * If the above failed or we will exceed the upper bound, fail. 1550 */ 1551 if ((i == cnt.v_page_count) || 1552 ((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) { 1553 vm_page_t m, next; 1554 1555again1: 1556 for (m = TAILQ_FIRST(&vm_page_queue_inactive); 1557 m != NULL; 1558 m = next) { 1559 1560 if (m->queue != PQ_INACTIVE) { 1561 break; 1562 } 1563 1564 next = TAILQ_NEXT(m, pageq); 1565 if (vm_page_sleep(m, "vpctw0", &m->busy)) 1566 goto again1; 1567 vm_page_test_dirty(m); 1568 if (m->dirty) { 1569 if (m->object->type == OBJT_VNODE) { 1570 vn_lock(m->object->handle, LK_EXCLUSIVE | LK_RETRY, curproc); 1571 vm_object_page_clean(m->object, 0, 0, OBJPC_SYNC); 1572 VOP_UNLOCK(m->object->handle, 0, curproc); 1573 goto again1; 1574 } else if (m->object->type == OBJT_SWAP || 1575 m->object->type == OBJT_DEFAULT) { 1576 vm_pageout_flush(&m, 1, 0); 1577 goto again1; 1578 } 1579 } 1580 if ((m->dirty == 0) && (m->busy == 0) && (m->hold_count == 0)) 1581 vm_page_cache(m); 1582 } 1583 1584 for (m = TAILQ_FIRST(&vm_page_queue_active); 1585 m != NULL; 1586 m = next) { 1587 1588 if (m->queue != PQ_ACTIVE) { 1589 break; 1590 } 1591 1592 next = TAILQ_NEXT(m, pageq); 1593 if (vm_page_sleep(m, "vpctw1", &m->busy)) 1594 goto again1; 1595 vm_page_test_dirty(m); 1596 if (m->dirty) { 1597 if (m->object->type == OBJT_VNODE) { 1598 vn_lock(m->object->handle, LK_EXCLUSIVE | LK_RETRY, curproc); 1599 vm_object_page_clean(m->object, 0, 0, OBJPC_SYNC); 1600 VOP_UNLOCK(m->object->handle, 0, curproc); 1601 goto again1; 1602 } else if (m->object->type == OBJT_SWAP || 1603 m->object->type == OBJT_DEFAULT) { 1604 vm_pageout_flush(&m, 1, 0); 1605 goto again1; 1606 } 1607 } 1608 if ((m->dirty == 0) && (m->busy == 0) && (m->hold_count == 0)) 1609 vm_page_cache(m); 1610 } 1611 1612 splx(s); 1613 continue; 1614 } 1615 start = i; 1616 1617 /* 1618 * Check successive pages for contiguous and free. 1619 */ 1620 for (i = start + 1; i < (start + size / PAGE_SIZE); i++) { 1621 int pqtype; 1622 pqtype = pga[i].queue - pga[i].pc; 1623 if ((VM_PAGE_TO_PHYS(&pga[i]) != 1624 (VM_PAGE_TO_PHYS(&pga[i - 1]) + PAGE_SIZE)) || 1625 ((pqtype != PQ_ZERO) && (pqtype != PQ_FREE) && (pqtype != PQ_CACHE))) { 1626 start++; 1627 goto again; 1628 } 1629 } 1630 1631 for (i = start; i < (start + size / PAGE_SIZE); i++) { 1632 int pqtype; 1633 vm_page_t m = &pga[i]; 1634 1635 pqtype = m->queue - m->pc; 1636 if (pqtype == PQ_CACHE) { 1637 vm_page_busy(m); 1638 vm_page_free(m); 1639 } 1640 1641 TAILQ_REMOVE(vm_page_queues[m->queue].pl, m, pageq); 1642 (*vm_page_queues[m->queue].lcnt)--; 1643 cnt.v_free_count--; 1644 m->valid = VM_PAGE_BITS_ALL; 1645 m->flags = 0; 1646 m->dirty = 0; 1647 m->wire_count = 0; 1648 m->busy = 0; 1649 m->queue = PQ_NONE; 1650 m->object = NULL; 1651 vm_page_wire(m); 1652 } 1653 1654 /* 1655 * We've found a contiguous chunk that meets are requirements. 1656 * Allocate kernel VM, unfree and assign the physical pages to it and 1657 * return kernel VM pointer. 1658 */ 1659 tmp_addr = addr = kmem_alloc_pageable(map, size); 1660 if (addr == 0) { 1661 /* 1662 * XXX We almost never run out of kernel virtual 1663 * space, so we don't make the allocated memory 1664 * above available. 1665 */ 1666 splx(s); 1667 return (NULL); 1668 } 1669 1670 for (i = start; i < (start + size / PAGE_SIZE); i++) { 1671 vm_page_t m = &pga[i]; 1672 vm_page_insert(m, kernel_object, 1673 OFF_TO_IDX(tmp_addr - VM_MIN_KERNEL_ADDRESS)); 1674 pmap_kenter(tmp_addr, VM_PAGE_TO_PHYS(m)); 1675 tmp_addr += PAGE_SIZE; 1676 } 1677 1678 splx(s); 1679 return ((void *)addr); 1680 } 1681 return NULL; 1682} 1683 1684void * 1685contigmalloc(size, type, flags, low, high, alignment, boundary) 1686 unsigned long size; /* should be size_t here and for malloc() */ 1687 struct malloc_type *type; 1688 int flags; 1689 unsigned long low; 1690 unsigned long high; 1691 unsigned long alignment; 1692 unsigned long boundary; 1693{ 1694 return contigmalloc1(size, type, flags, low, high, alignment, boundary, 1695 kernel_map); 1696} 1697 1698vm_offset_t 1699vm_page_alloc_contig(size, low, high, alignment) 1700 vm_offset_t size; 1701 vm_offset_t low; 1702 vm_offset_t high; 1703 vm_offset_t alignment; 1704{ 1705 return ((vm_offset_t)contigmalloc1(size, M_DEVBUF, M_NOWAIT, low, high, 1706 alignment, 0ul, kernel_map)); 1707} 1708 1709#include "opt_ddb.h" 1710#ifdef DDB 1711#include <sys/kernel.h> 1712 1713#include <ddb/ddb.h> 1714 1715DB_SHOW_COMMAND(page, vm_page_print_page_info) 1716{ 1717 db_printf("cnt.v_free_count: %d\n", cnt.v_free_count); 1718 db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count); 1719 db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count); 1720 db_printf("cnt.v_active_count: %d\n", cnt.v_active_count); 1721 db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count); 1722 db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved); 1723 db_printf("cnt.v_free_min: %d\n", cnt.v_free_min); 1724 db_printf("cnt.v_free_target: %d\n", cnt.v_free_target); 1725 db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min); 1726 db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target); 1727} 1728 1729DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info) 1730{ 1731 int i; 1732 db_printf("PQ_FREE:"); 1733 for(i=0;i<PQ_L2_SIZE;i++) { 1734 db_printf(" %d", *vm_page_queues[PQ_FREE + i].lcnt); 1735 } 1736 db_printf("\n"); 1737 1738 db_printf("PQ_CACHE:"); 1739 for(i=0;i<PQ_L2_SIZE;i++) { 1740 db_printf(" %d", *vm_page_queues[PQ_CACHE + i].lcnt); 1741 } 1742 db_printf("\n"); 1743 1744 db_printf("PQ_ZERO:"); 1745 for(i=0;i<PQ_L2_SIZE;i++) { 1746 db_printf(" %d", *vm_page_queues[PQ_ZERO + i].lcnt); 1747 } 1748 db_printf("\n"); 1749 1750 db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n", 1751 *vm_page_queues[PQ_ACTIVE].lcnt, 1752 *vm_page_queues[PQ_INACTIVE].lcnt); 1753} 1754#endif /* DDB */ 1755