vm_object.c revision 7204
1/* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $Id: vm_object.c,v 1.34 1995/03/20 10:20:41 davidg Exp $ 65 */ 66 67/* 68 * Virtual memory object module. 69 */ 70 71#include <sys/param.h> 72#include <sys/systm.h> 73#include <sys/kernel.h> 74#include <sys/proc.h> /* for curproc, pageproc */ 75#include <sys/malloc.h> 76#include <sys/vnode.h> 77#include <sys/mount.h> 78 79#include <vm/vm.h> 80#include <vm/vm_page.h> 81#include <vm/vm_pageout.h> 82#include <vm/vm_pager.h> 83#include <vm/swap_pager.h> 84#include <vm/vnode_pager.h> 85#include <vm/vm_kern.h> 86 87static void _vm_object_allocate(vm_size_t, vm_object_t); 88 89/* 90 * Virtual memory objects maintain the actual data 91 * associated with allocated virtual memory. A given 92 * page of memory exists within exactly one object. 93 * 94 * An object is only deallocated when all "references" 95 * are given up. Only one "reference" to a given 96 * region of an object should be writeable. 97 * 98 * Associated with each object is a list of all resident 99 * memory pages belonging to that object; this list is 100 * maintained by the "vm_page" module, and locked by the object's 101 * lock. 102 * 103 * Each object also records a "pager" routine which is 104 * used to retrieve (and store) pages to the proper backing 105 * storage. In addition, objects may be backed by other 106 * objects from which they were virtual-copied. 107 * 108 * The only items within the object structure which are 109 * modified after time of creation are: 110 * reference count locked by object's lock 111 * pager routine locked by object's lock 112 * 113 */ 114 115 116struct vm_object kernel_object_store; 117struct vm_object kmem_object_store; 118 119int vm_object_cache_max; 120 121#define VM_OBJECT_HASH_COUNT 509 122 123struct vm_object_hash_head vm_object_hashtable[VM_OBJECT_HASH_COUNT]; 124 125long object_collapses = 0; 126long object_bypasses = 0; 127 128static void 129_vm_object_allocate(size, object) 130 vm_size_t size; 131 register vm_object_t object; 132{ 133 TAILQ_INIT(&object->memq); 134 TAILQ_INIT(&object->reverse_shadow_head); 135 136 object->size = size; 137 object->ref_count = 1; 138 vm_object_lock_init(object); 139 object->flags = OBJ_INTERNAL; /* pager will reset */ 140 object->paging_in_progress = 0; 141 object->resident_page_count = 0; 142 143 object->pager = NULL; 144 object->paging_offset = 0; 145 object->shadow = NULL; 146 object->shadow_offset = (vm_offset_t) 0; 147 object->copy = NULL; 148 149 object->last_read = 0; 150 151 simple_lock(&vm_object_list_lock); 152 TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); 153 vm_object_count++; 154 cnt.v_nzfod += atop(size); 155 simple_unlock(&vm_object_list_lock); 156} 157 158/* 159 * vm_object_init: 160 * 161 * Initialize the VM objects module. 162 */ 163void 164vm_object_init(vm_offset_t nothing) 165{ 166 register int i; 167 168 TAILQ_INIT(&vm_object_cached_list); 169 TAILQ_INIT(&vm_object_list); 170 vm_object_count = 0; 171 simple_lock_init(&vm_cache_lock); 172 simple_lock_init(&vm_object_list_lock); 173 174 vm_object_cache_max = 84; 175 if (cnt.v_page_count > 1000) 176 vm_object_cache_max += (cnt.v_page_count - 1000) / 4; 177 178 for (i = 0; i < VM_OBJECT_HASH_COUNT; i++) 179 TAILQ_INIT(&vm_object_hashtable[i]); 180 181 kernel_object = &kernel_object_store; 182 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS, 183 kernel_object); 184 185 kmem_object = &kmem_object_store; 186 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS, 187 kmem_object); 188} 189 190/* 191 * vm_object_allocate: 192 * 193 * Returns a new object with the given size. 194 */ 195 196vm_object_t 197vm_object_allocate(size) 198 vm_size_t size; 199{ 200 register vm_object_t result; 201 202 result = (vm_object_t) 203 malloc((u_long) sizeof *result, M_VMOBJ, M_WAITOK); 204 205 206 _vm_object_allocate(size, result); 207 208 return (result); 209} 210 211 212/* 213 * vm_object_reference: 214 * 215 * Gets another reference to the given object. 216 */ 217inline void 218vm_object_reference(object) 219 register vm_object_t object; 220{ 221 if (object == NULL) 222 return; 223 224 vm_object_lock(object); 225 object->ref_count++; 226 vm_object_unlock(object); 227} 228 229/* 230 * vm_object_deallocate: 231 * 232 * Release a reference to the specified object, 233 * gained either through a vm_object_allocate 234 * or a vm_object_reference call. When all references 235 * are gone, storage associated with this object 236 * may be relinquished. 237 * 238 * No object may be locked. 239 */ 240void 241vm_object_deallocate(object) 242 vm_object_t object; 243{ 244 vm_object_t temp; 245 246 while (object != NULL) { 247 248 if (object->ref_count == 0) 249 panic("vm_object_deallocate: object deallocated too many times"); 250 251 /* 252 * The cache holds a reference (uncounted) to the object; we 253 * must lock it before removing the object. 254 */ 255 256 vm_object_cache_lock(); 257 258 /* 259 * Lose the reference 260 */ 261 vm_object_lock(object); 262 263 object->ref_count--; 264 265 if (object->ref_count != 0) { 266 if ((object->ref_count == 1) && 267 (object->flags & OBJ_INTERNAL)) { 268 vm_object_t robject; 269 robject = object->reverse_shadow_head.tqh_first; 270 if ((robject != NULL) && 271 (robject->flags & OBJ_INTERNAL)) { 272 int s; 273 robject->ref_count += 2; 274 object->ref_count += 2; 275 276 do { 277 s = splhigh(); 278 while (robject->paging_in_progress) { 279 robject->flags |= OBJ_PIPWNT; 280 tsleep(robject, PVM, "objde1", 0); 281 } 282 283 while (object->paging_in_progress) { 284 object->flags |= OBJ_PIPWNT; 285 tsleep(object, PVM, "objde2", 0); 286 } 287 splx(s); 288 289 } while( object->paging_in_progress || robject->paging_in_progress); 290 291 object->ref_count -= 2; 292 robject->ref_count -= 2; 293 if( robject->ref_count == 0) { 294 vm_object_unlock(object); 295 vm_object_cache_unlock(); 296 robject->ref_count += 1; 297 object = robject; 298 continue; 299 } 300 vm_object_cache_unlock(); 301 vm_object_unlock(object); 302 vm_object_lock(robject); 303 vm_object_collapse(robject); 304 return; 305 } 306 } 307 vm_object_unlock(object); 308 /* 309 * If there are still references, then we are done. 310 */ 311 vm_object_cache_unlock(); 312 return; 313 } 314 315 /* 316 * See if this object can persist and has some resident 317 * pages. If so, enter it in the cache. 318 */ 319 if ((object->flags & OBJ_CANPERSIST) && 320 (object->resident_page_count != 0)) { 321 vm_pager_t pager = object->pager; 322 vn_pager_t vnp = (vn_pager_t) pager->pg_data; 323 324 if (pager->pg_type == PG_VNODE) { 325 vnp->vnp_vp->v_flag &= ~VTEXT; 326 } 327 328 TAILQ_INSERT_TAIL(&vm_object_cached_list, object, 329 cached_list); 330 vm_object_cached++; 331 vm_object_cache_unlock(); 332 333 vm_object_unlock(object); 334 335 vm_object_cache_trim(); 336 return; 337 } 338 339 /* 340 * Make sure no one can look us up now. 341 */ 342 object->flags |= OBJ_DEAD; 343 vm_object_remove(object->pager); 344 vm_object_cache_unlock(); 345 346 temp = object->shadow; 347 if (temp) 348 TAILQ_REMOVE(&temp->reverse_shadow_head, object, reverse_shadow_list); 349 vm_object_terminate(object); 350 /* unlocks and deallocates object */ 351 object = temp; 352 } 353} 354 355/* 356 * vm_object_terminate actually destroys the specified object, freeing 357 * up all previously used resources. 358 * 359 * The object must be locked. 360 */ 361void 362vm_object_terminate(object) 363 register vm_object_t object; 364{ 365 register vm_page_t p, next; 366 vm_object_t shadow_object; 367 int s; 368 struct vnode *vp = NULL; 369 370 /* 371 * Detach the object from its shadow if we are the shadow's copy. 372 */ 373 if ((shadow_object = object->shadow) != NULL) { 374 vm_object_lock(shadow_object); 375 if (shadow_object->copy == object) 376 shadow_object->copy = NULL; 377#if 0 378 else if (shadow_object->copy != NULL) 379 panic("vm_object_terminate: copy/shadow inconsistency"); 380#endif 381 vm_object_unlock(shadow_object); 382 } 383 if (object->pager && (object->pager->pg_type == PG_VNODE)) { 384 vn_pager_t vnp = object->pager->pg_data; 385 386 vp = vnp->vnp_vp; 387 VOP_LOCK(vp); 388 vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0); 389 } 390 /* 391 * Wait until the pageout daemon is through with the object. 392 */ 393 394 s = splhigh(); 395 while (object->paging_in_progress) { 396 vm_object_unlock(object); 397 object->flags |= OBJ_PIPWNT; 398 tsleep((caddr_t) object, PVM, "objtrm", 0); 399 vm_object_lock(object); 400 } 401 splx(s); 402 403 /* 404 * While the paging system is locked, pull the object's pages off the 405 * active and inactive queues. This keeps the pageout daemon from 406 * playing with them during vm_pager_deallocate. 407 * 408 * We can't free the pages yet, because the object's pager may have to 409 * write them out before deallocating the paging space. 410 */ 411 412 for (p = object->memq.tqh_first; p; p = next) { 413 VM_PAGE_CHECK(p); 414 next = p->listq.tqe_next; 415 416 vm_page_lock_queues(); 417 if (p->flags & PG_CACHE) 418 vm_page_free(p); 419 else { 420 s = splhigh(); 421 vm_page_unqueue(p); 422 splx(s); 423 } 424 vm_page_unlock_queues(); 425 p = next; 426 } 427 428 if (object->paging_in_progress != 0) 429 panic("vm_object_deallocate: pageout in progress"); 430 431 /* 432 * Clean and free the pages, as appropriate. All references to the 433 * object are gone, so we don't need to lock it. 434 */ 435 if (vp != NULL) { 436 VOP_UNLOCK(vp); 437 (void) vm_object_page_clean(object, 0, 0, TRUE, TRUE); 438 VOP_LOCK(vp); 439 vinvalbuf(vp, 0, NOCRED, NULL, 0, 0); 440 VOP_UNLOCK(vp); 441 } 442 443 /* 444 * Now free the pages. For internal objects, this also removes them 445 * from paging queues. 446 */ 447 while ((p = object->memq.tqh_first) != NULL) { 448 VM_PAGE_CHECK(p); 449 vm_page_lock_queues(); 450 if (p->flags & PG_BUSY) 451 printf("vm_object_terminate: freeing busy page\n"); 452 PAGE_WAKEUP(p); 453 vm_page_free(p); 454 cnt.v_pfree++; 455 vm_page_unlock_queues(); 456 } 457 vm_object_unlock(object); 458 459 /* 460 * Let the pager know object is dead. 461 */ 462 if (object->pager != NULL) 463 vm_pager_deallocate(object->pager); 464 465 simple_lock(&vm_object_list_lock); 466 TAILQ_REMOVE(&vm_object_list, object, object_list); 467 vm_object_count--; 468 simple_unlock(&vm_object_list_lock); 469 470 wakeup(object); 471 472 /* 473 * Free the space for the object. 474 */ 475 free((caddr_t) object, M_VMOBJ); 476} 477 478/* 479 * vm_object_page_clean 480 * 481 * Clean all dirty pages in the specified range of object. 482 * Leaves page on whatever queue it is currently on. 483 * 484 * Odd semantics: if start == end, we clean everything. 485 * 486 * The object must be locked. 487 */ 488boolean_t 489vm_object_page_clean(object, start, end, syncio, de_queue) 490 register vm_object_t object; 491 register vm_offset_t start; 492 register vm_offset_t end; 493 boolean_t syncio; 494 boolean_t de_queue; 495{ 496 register vm_page_t p, nextp; 497 int size; 498 499 if (object->pager == NULL) 500 return 1; 501 502 if (start != end) { 503 start = trunc_page(start); 504 end = round_page(end); 505 } 506 size = end - start; 507 508again: 509 /* 510 * Wait until the pageout daemon is through with the object. 511 */ 512 while (object->paging_in_progress) { 513 object->flags |= OBJ_PIPWNT; 514 tsleep(object, PVM, "objpcw", 0); 515 } 516 517 nextp = object->memq.tqh_first; 518 while ((p = nextp) && ((start == end) || (size != 0))) { 519 nextp = p->listq.tqe_next; 520 if (start == end || (p->offset >= start && p->offset < end)) { 521 if ((p->flags & PG_BUSY) || p->busy) { 522 int s = splhigh(); 523 524 p->flags |= PG_WANTED; 525 tsleep(p, PVM, "objpcn", 0); 526 splx(s); 527 goto again; 528 } 529 size -= PAGE_SIZE; 530 531 vm_page_test_dirty(p); 532 533 if ((p->dirty & p->valid) != 0) { 534 vm_pageout_clean(p, VM_PAGEOUT_FORCE); 535 goto again; 536 } 537 } 538 } 539 wakeup((caddr_t) object); 540 return 1; 541} 542 543/* 544 * vm_object_deactivate_pages 545 * 546 * Deactivate all pages in the specified object. (Keep its pages 547 * in memory even though it is no longer referenced.) 548 * 549 * The object must be locked. 550 */ 551void 552vm_object_deactivate_pages(object) 553 register vm_object_t object; 554{ 555 register vm_page_t p, next; 556 557 for (p = object->memq.tqh_first; p != NULL; p = next) { 558 next = p->listq.tqe_next; 559 vm_page_lock_queues(); 560 vm_page_deactivate(p); 561 vm_page_unlock_queues(); 562 } 563} 564 565/* 566 * Trim the object cache to size. 567 */ 568void 569vm_object_cache_trim() 570{ 571 register vm_object_t object; 572 573 vm_object_cache_lock(); 574 while (vm_object_cached > vm_object_cache_max) { 575 object = vm_object_cached_list.tqh_first; 576 vm_object_cache_unlock(); 577 578 if (object != vm_object_lookup(object->pager)) 579 panic("vm_object_cache_trim: I'm sooo confused."); 580 581 pager_cache(object, FALSE); 582 583 vm_object_cache_lock(); 584 } 585 vm_object_cache_unlock(); 586} 587 588 589/* 590 * vm_object_pmap_copy: 591 * 592 * Makes all physical pages in the specified 593 * object range copy-on-write. No writeable 594 * references to these pages should remain. 595 * 596 * The object must *not* be locked. 597 */ 598void 599vm_object_pmap_copy(object, start, end) 600 register vm_object_t object; 601 register vm_offset_t start; 602 register vm_offset_t end; 603{ 604 register vm_page_t p; 605 606 if (object == NULL) 607 return; 608 609 vm_object_lock(object); 610 for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) { 611 if ((start <= p->offset) && (p->offset < end)) { 612 vm_page_protect(p, VM_PROT_READ); 613 p->flags |= PG_COPYONWRITE; 614 } 615 } 616 vm_object_unlock(object); 617} 618 619/* 620 * vm_object_pmap_remove: 621 * 622 * Removes all physical pages in the specified 623 * object range from all physical maps. 624 * 625 * The object must *not* be locked. 626 */ 627void 628vm_object_pmap_remove(object, start, end) 629 register vm_object_t object; 630 register vm_offset_t start; 631 register vm_offset_t end; 632{ 633 register vm_page_t p; 634 int s; 635 636 if (object == NULL) 637 return; 638 ++object->paging_in_progress; 639 640 vm_object_lock(object); 641again: 642 for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) { 643 if ((start <= p->offset) && (p->offset < end)) { 644 s = splhigh(); 645 if ((p->flags & PG_BUSY) || p->busy) { 646 p->flags |= PG_WANTED; 647 tsleep((caddr_t) p, PVM, "vmopmr", 0); 648 splx(s); 649 goto again; 650 } 651 splx(s); 652 vm_page_protect(p, VM_PROT_NONE); 653 } 654 } 655 vm_object_unlock(object); 656 vm_object_pip_wakeup(object); 657} 658 659/* 660 * vm_object_copy: 661 * 662 * Create a new object which is a copy of an existing 663 * object, and mark all of the pages in the existing 664 * object 'copy-on-write'. The new object has one reference. 665 * Returns the new object. 666 * 667 * May defer the copy until later if the object is not backed 668 * up by a non-default pager. 669 */ 670void 671vm_object_copy(src_object, src_offset, size, 672 dst_object, dst_offset, src_needs_copy) 673 register vm_object_t src_object; 674 vm_offset_t src_offset; 675 vm_size_t size; 676 vm_object_t *dst_object;/* OUT */ 677 vm_offset_t *dst_offset;/* OUT */ 678 boolean_t *src_needs_copy; /* OUT */ 679{ 680 register vm_object_t new_copy; 681 register vm_object_t old_copy; 682 vm_offset_t new_start, new_end; 683 684 register vm_page_t p; 685 686 if (src_object == NULL) { 687 /* 688 * Nothing to copy 689 */ 690 *dst_object = NULL; 691 *dst_offset = 0; 692 *src_needs_copy = FALSE; 693 return; 694 } 695 /* 696 * If the object's pager is null_pager or the default pager, we don't 697 * have to make a copy of it. Instead, we set the needs copy flag and 698 * make a shadow later. 699 */ 700 701 vm_object_lock(src_object); 702 703 /* 704 * Try to collapse the object before copying it. 705 */ 706 707 vm_object_collapse(src_object); 708 709 if (src_object->pager == NULL || 710 (src_object->flags & OBJ_INTERNAL)) { 711 712 /* 713 * Make another reference to the object 714 */ 715 src_object->ref_count++; 716 717 /* 718 * Mark all of the pages copy-on-write. 719 */ 720 for (p = src_object->memq.tqh_first; p; p = p->listq.tqe_next) 721 if (src_offset <= p->offset && 722 p->offset < src_offset + size) 723 p->flags |= PG_COPYONWRITE; 724 vm_object_unlock(src_object); 725 726 *dst_object = src_object; 727 *dst_offset = src_offset; 728 729 /* 730 * Must make a shadow when write is desired 731 */ 732 *src_needs_copy = TRUE; 733 return; 734 } 735 /* 736 * If the object has a pager, the pager wants to see all of the 737 * changes. We need a copy-object for the changed pages. 738 * 739 * If there is a copy-object, and it is empty, no changes have been made 740 * to the object since the copy-object was made. We can use the same 741 * copy- object. 742 */ 743 744Retry1: 745 old_copy = src_object->copy; 746 if (old_copy != NULL) { 747 /* 748 * Try to get the locks (out of order) 749 */ 750 if (!vm_object_lock_try(old_copy)) { 751 vm_object_unlock(src_object); 752 753 /* should spin a bit here... */ 754 tsleep((caddr_t) old_copy, PVM, "cpylck", 1); 755 vm_object_lock(src_object); 756 goto Retry1; 757 } 758 if (old_copy->resident_page_count == 0 && 759 old_copy->pager == NULL) { 760 /* 761 * Return another reference to the existing 762 * copy-object. 763 */ 764 old_copy->ref_count++; 765 vm_object_unlock(old_copy); 766 vm_object_unlock(src_object); 767 *dst_object = old_copy; 768 *dst_offset = src_offset; 769 *src_needs_copy = FALSE; 770 return; 771 } 772 vm_object_unlock(old_copy); 773 } 774 vm_object_unlock(src_object); 775 776 /* 777 * If the object has a pager, the pager wants to see all of the 778 * changes. We must make a copy-object and put the changed pages 779 * there. 780 * 781 * The copy-object is always made large enough to completely shadow the 782 * original object, since it may have several users who want to shadow 783 * the original object at different points. 784 */ 785 786 new_copy = vm_object_allocate(src_object->size); 787 788Retry2: 789 vm_object_lock(src_object); 790 /* 791 * Copy object may have changed while we were unlocked 792 */ 793 old_copy = src_object->copy; 794 if (old_copy != NULL) { 795 /* 796 * Try to get the locks (out of order) 797 */ 798 if (!vm_object_lock_try(old_copy)) { 799 vm_object_unlock(src_object); 800 tsleep((caddr_t) old_copy, PVM, "cpylck", 1); 801 goto Retry2; 802 } 803 /* 804 * Consistency check 805 */ 806 if (old_copy->shadow != src_object || 807 old_copy->shadow_offset != (vm_offset_t) 0) 808 panic("vm_object_copy: copy/shadow inconsistency"); 809 810 /* 811 * Make the old copy-object shadow the new one. It will 812 * receive no more pages from the original object. 813 */ 814 815 src_object->ref_count--; /* remove ref. from old_copy */ 816 if (old_copy->shadow) 817 TAILQ_REMOVE(&old_copy->shadow->reverse_shadow_head, old_copy, reverse_shadow_list); 818 old_copy->shadow = new_copy; 819 TAILQ_INSERT_TAIL(&old_copy->shadow->reverse_shadow_head, old_copy, reverse_shadow_list); 820 new_copy->ref_count++; /* locking not needed - we have the 821 * only pointer */ 822 vm_object_unlock(old_copy); /* done with old_copy */ 823 } 824 new_start = (vm_offset_t) 0; /* always shadow original at 0 */ 825 new_end = (vm_offset_t) new_copy->size; /* for the whole object */ 826 827 /* 828 * Point the new copy at the existing object. 829 */ 830 831 new_copy->shadow = src_object; 832 TAILQ_INSERT_TAIL(&new_copy->shadow->reverse_shadow_head, new_copy, reverse_shadow_list); 833 new_copy->shadow_offset = new_start; 834 src_object->ref_count++; 835 src_object->copy = new_copy; 836 837 /* 838 * Mark all the affected pages of the existing object copy-on-write. 839 */ 840 for (p = src_object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) 841 if ((new_start <= p->offset) && (p->offset < new_end)) 842 p->flags |= PG_COPYONWRITE; 843 844 vm_object_unlock(src_object); 845 846 *dst_object = new_copy; 847 *dst_offset = src_offset - new_start; 848 *src_needs_copy = FALSE; 849} 850 851/* 852 * vm_object_shadow: 853 * 854 * Create a new object which is backed by the 855 * specified existing object range. The source 856 * object reference is deallocated. 857 * 858 * The new object and offset into that object 859 * are returned in the source parameters. 860 */ 861 862void 863vm_object_shadow(object, offset, length) 864 vm_object_t *object; /* IN/OUT */ 865 vm_offset_t *offset; /* IN/OUT */ 866 vm_size_t length; 867{ 868 register vm_object_t source; 869 register vm_object_t result; 870 871 source = *object; 872 873 /* 874 * Allocate a new object with the given length 875 */ 876 877 if ((result = vm_object_allocate(length)) == NULL) 878 panic("vm_object_shadow: no object for shadowing"); 879 880 /* 881 * The new object shadows the source object, adding a reference to it. 882 * Our caller changes his reference to point to the new object, 883 * removing a reference to the source object. Net result: no change 884 * of reference count. 885 */ 886 result->shadow = source; 887 if (source) 888 TAILQ_INSERT_TAIL(&result->shadow->reverse_shadow_head, result, reverse_shadow_list); 889 890 /* 891 * Store the offset into the source object, and fix up the offset into 892 * the new object. 893 */ 894 895 result->shadow_offset = *offset; 896 897 /* 898 * Return the new things 899 */ 900 901 *offset = 0; 902 *object = result; 903} 904 905/* 906 * vm_object_hash hashes the pager/id pair. 907 */ 908 909#define vm_object_hash(pager) \ 910 (((unsigned)pager >> 5)%VM_OBJECT_HASH_COUNT) 911 912/* 913 * vm_object_lookup looks in the object cache for an object with the 914 * specified pager and paging id. 915 */ 916 917vm_object_t 918vm_object_lookup(pager) 919 vm_pager_t pager; 920{ 921 register vm_object_hash_entry_t entry; 922 vm_object_t object; 923 924 cnt.v_lookups++; 925 vm_object_cache_lock(); 926 927 for (entry = vm_object_hashtable[vm_object_hash(pager)].tqh_first; 928 entry != NULL; 929 entry = entry->hash_links.tqe_next) { 930 object = entry->object; 931 if (object->pager == pager) { 932 vm_object_lock(object); 933 if (object->ref_count == 0) { 934 TAILQ_REMOVE(&vm_object_cached_list, object, 935 cached_list); 936 vm_object_cached--; 937 } 938 object->ref_count++; 939 vm_object_unlock(object); 940 vm_object_cache_unlock(); 941 cnt.v_hits++; 942 return (object); 943 } 944 } 945 946 vm_object_cache_unlock(); 947 return (NULL); 948} 949 950/* 951 * vm_object_enter enters the specified object/pager/id into 952 * the hash table. 953 */ 954 955void 956vm_object_enter(object, pager) 957 vm_object_t object; 958 vm_pager_t pager; 959{ 960 struct vm_object_hash_head *bucket; 961 register vm_object_hash_entry_t entry; 962 963 /* 964 * We don't cache null objects, and we can't cache objects with the 965 * null pager. 966 */ 967 968 if (object == NULL) 969 return; 970 if (pager == NULL) 971 return; 972 973 bucket = &vm_object_hashtable[vm_object_hash(pager)]; 974 entry = (vm_object_hash_entry_t) 975 malloc((u_long) sizeof *entry, M_VMOBJHASH, M_WAITOK); 976 entry->object = object; 977 978 vm_object_cache_lock(); 979 TAILQ_INSERT_TAIL(bucket, entry, hash_links); 980 vm_object_cache_unlock(); 981} 982 983/* 984 * vm_object_remove: 985 * 986 * Remove the pager from the hash table. 987 * Note: This assumes that the object cache 988 * is locked. XXX this should be fixed 989 * by reorganizing vm_object_deallocate. 990 */ 991void 992vm_object_remove(pager) 993 register vm_pager_t pager; 994{ 995 struct vm_object_hash_head *bucket; 996 register vm_object_hash_entry_t entry; 997 register vm_object_t object; 998 999 bucket = &vm_object_hashtable[vm_object_hash(pager)]; 1000 1001 for (entry = bucket->tqh_first; 1002 entry != NULL; 1003 entry = entry->hash_links.tqe_next) { 1004 object = entry->object; 1005 if (object->pager == pager) { 1006 TAILQ_REMOVE(bucket, entry, hash_links); 1007 free((caddr_t) entry, M_VMOBJHASH); 1008 break; 1009 } 1010 } 1011} 1012 1013/* 1014 * this version of collapse allows the operation to occur earlier and 1015 * when paging_in_progress is true for an object... This is not a complete 1016 * operation, but should plug 99.9% of the rest of the leaks. 1017 */ 1018static void 1019vm_object_qcollapse(object) 1020 register vm_object_t object; 1021{ 1022 register vm_object_t backing_object; 1023 register vm_offset_t backing_offset, new_offset; 1024 register vm_page_t p, pp; 1025 register vm_size_t size; 1026 1027 backing_object = object->shadow; 1028 if (backing_object->shadow != NULL && 1029 backing_object->shadow->copy == backing_object) 1030 return; 1031 if (backing_object->ref_count != 1) 1032 return; 1033 1034 backing_object->ref_count += 2; 1035 1036 backing_offset = object->shadow_offset; 1037 size = object->size; 1038 p = backing_object->memq.tqh_first; 1039 while (p) { 1040 vm_page_t next; 1041 1042 next = p->listq.tqe_next; 1043 if ((p->flags & (PG_BUSY | PG_FICTITIOUS | PG_CACHE)) || 1044 !p->valid || p->hold_count || p->wire_count || p->busy || p->bmapped) { 1045 p = next; 1046 continue; 1047 } 1048 vm_page_protect(p, VM_PROT_NONE); 1049 new_offset = (p->offset - backing_offset); 1050 if (p->offset < backing_offset || 1051 new_offset >= size) { 1052 if (backing_object->pager) 1053 swap_pager_freespace(backing_object->pager, 1054 backing_object->paging_offset + p->offset, PAGE_SIZE); 1055 vm_page_lock_queues(); 1056 vm_page_free(p); 1057 vm_page_unlock_queues(); 1058 } else { 1059 pp = vm_page_lookup(object, new_offset); 1060 if (pp != NULL || (object->pager && vm_pager_has_page(object->pager, 1061 object->paging_offset + new_offset))) { 1062 if (backing_object->pager) 1063 swap_pager_freespace(backing_object->pager, 1064 backing_object->paging_offset + p->offset, PAGE_SIZE); 1065 vm_page_lock_queues(); 1066 vm_page_free(p); 1067 vm_page_unlock_queues(); 1068 } else { 1069 if( backing_object->pager) 1070 swap_pager_freespace(backing_object->pager, 1071 backing_object->paging_offset + p->offset, PAGE_SIZE); 1072 vm_page_rename(p, object, new_offset); 1073 p->dirty = VM_PAGE_BITS_ALL; 1074 } 1075 } 1076 p = next; 1077 } 1078 backing_object->ref_count -= 2; 1079} 1080 1081boolean_t vm_object_collapse_allowed = TRUE; 1082 1083/* 1084 * vm_object_collapse: 1085 * 1086 * Collapse an object with the object backing it. 1087 * Pages in the backing object are moved into the 1088 * parent, and the backing object is deallocated. 1089 * 1090 * Requires that the object be locked and the page 1091 * queues be unlocked. 1092 * 1093 * This routine has significant changes by John S. Dyson 1094 * to fix some swap memory leaks. 18 Dec 93 1095 * 1096 */ 1097void 1098vm_object_collapse(object) 1099 register vm_object_t object; 1100 1101{ 1102 register vm_object_t backing_object; 1103 register vm_offset_t backing_offset; 1104 register vm_size_t size; 1105 register vm_offset_t new_offset; 1106 register vm_page_t p, pp; 1107 1108 if (!vm_object_collapse_allowed) 1109 return; 1110 1111 while (TRUE) { 1112 /* 1113 * Verify that the conditions are right for collapse: 1114 * 1115 * The object exists and no pages in it are currently being paged 1116 * out. 1117 */ 1118 if (object == NULL) 1119 return; 1120 1121 /* 1122 * Make sure there is a backing object. 1123 */ 1124 if ((backing_object = object->shadow) == NULL) 1125 return; 1126 1127 /* 1128 * we check the backing object first, because it is most likely 1129 * !OBJ_INTERNAL. 1130 */ 1131 if ((backing_object->flags & OBJ_INTERNAL) == 0 || 1132 (backing_object->flags & OBJ_DEAD) || 1133 (object->flags & OBJ_INTERNAL) == 0 || 1134 (object->flags & OBJ_DEAD)) 1135 return; 1136 1137 if (object->paging_in_progress != 0 || 1138 backing_object->paging_in_progress != 0) { 1139 if (vm_object_lock_try(backing_object)) { 1140 vm_object_qcollapse(object); 1141 vm_object_unlock(backing_object); 1142 } 1143 return; 1144 } 1145 1146 vm_object_lock(backing_object); 1147 1148 /* 1149 * The backing object can't be a copy-object: the 1150 * shadow_offset for the copy-object must stay as 0. 1151 * Furthermore (for the 'we have all the pages' case), if we 1152 * bypass backing_object and just shadow the next object in 1153 * the chain, old pages from that object would then have to be 1154 * copied BOTH into the (former) backing_object and into the 1155 * parent object. 1156 */ 1157 if (backing_object->shadow != NULL && 1158 backing_object->shadow->copy == backing_object) { 1159 vm_object_unlock(backing_object); 1160 return; 1161 } 1162 1163 /* 1164 * We know that we can either collapse the backing object (if 1165 * the parent is the only reference to it) or (perhaps) remove 1166 * the parent's reference to it. 1167 */ 1168 1169 backing_offset = object->shadow_offset; 1170 size = object->size; 1171 1172 /* 1173 * If there is exactly one reference to the backing object, we 1174 * can collapse it into the parent. 1175 */ 1176 1177 if (backing_object->ref_count == 1) { 1178 1179 backing_object->flags |= OBJ_DEAD; 1180 /* 1181 * We can collapse the backing object. 1182 * 1183 * Move all in-memory pages from backing_object to the 1184 * parent. Pages that have been paged out will be 1185 * overwritten by any of the parent's pages that 1186 * shadow them. 1187 */ 1188 1189 while ((p = backing_object->memq.tqh_first) != 0) { 1190 1191 new_offset = (p->offset - backing_offset); 1192 1193 /* 1194 * If the parent has a page here, or if this 1195 * page falls outside the parent, dispose of 1196 * it. 1197 * 1198 * Otherwise, move it as planned. 1199 */ 1200 1201 if (p->offset < backing_offset || 1202 new_offset >= size) { 1203 vm_page_lock_queues(); 1204 vm_page_protect(p, VM_PROT_NONE); 1205 PAGE_WAKEUP(p); 1206 vm_page_free(p); 1207 vm_page_unlock_queues(); 1208 } else { 1209 pp = vm_page_lookup(object, new_offset); 1210 if (pp != NULL || (object->pager && vm_pager_has_page(object->pager, 1211 object->paging_offset + new_offset))) { 1212 vm_page_lock_queues(); 1213 vm_page_protect(p, VM_PROT_NONE); 1214 PAGE_WAKEUP(p); 1215 vm_page_free(p); 1216 vm_page_unlock_queues(); 1217 } else { 1218 vm_page_rename(p, object, new_offset); 1219 } 1220 } 1221 } 1222 1223 /* 1224 * Move the pager from backing_object to object. 1225 */ 1226 1227 if (backing_object->pager) { 1228 backing_object->paging_in_progress++; 1229 if (object->pager) { 1230 vm_pager_t bopager; 1231 1232 object->paging_in_progress++; 1233 /* 1234 * copy shadow object pages into ours 1235 * and destroy unneeded pages in 1236 * shadow object. 1237 */ 1238 bopager = backing_object->pager; 1239 backing_object->pager = NULL; 1240 swap_pager_copy( 1241 bopager, backing_object->paging_offset, 1242 object->pager, object->paging_offset, 1243 object->shadow_offset); 1244 vm_object_pip_wakeup(object); 1245 } else { 1246 object->paging_in_progress++; 1247 /* 1248 * grab the shadow objects pager 1249 */ 1250 object->pager = backing_object->pager; 1251 object->paging_offset = backing_object->paging_offset + backing_offset; 1252 backing_object->pager = NULL; 1253 /* 1254 * free unnecessary blocks 1255 */ 1256 swap_pager_freespace(object->pager, 0, object->paging_offset); 1257 vm_object_pip_wakeup(object); 1258 } 1259 1260 vm_object_pip_wakeup(backing_object); 1261 } 1262 /* 1263 * Object now shadows whatever backing_object did. 1264 * Note that the reference to backing_object->shadow 1265 * moves from within backing_object to within object. 1266 */ 1267 1268 TAILQ_REMOVE(&object->shadow->reverse_shadow_head, object, 1269 reverse_shadow_list); 1270 if (backing_object->shadow) 1271 TAILQ_REMOVE(&backing_object->shadow->reverse_shadow_head, 1272 backing_object, reverse_shadow_list); 1273 object->shadow = backing_object->shadow; 1274 if (object->shadow) 1275 TAILQ_INSERT_TAIL(&object->shadow->reverse_shadow_head, 1276 object, reverse_shadow_list); 1277 1278 object->shadow_offset += backing_object->shadow_offset; 1279 /* 1280 * Discard backing_object. 1281 * 1282 * Since the backing object has no pages, no pager left, 1283 * and no object references within it, all that is 1284 * necessary is to dispose of it. 1285 */ 1286 1287 vm_object_unlock(backing_object); 1288 1289 simple_lock(&vm_object_list_lock); 1290 TAILQ_REMOVE(&vm_object_list, backing_object, 1291 object_list); 1292 vm_object_count--; 1293 simple_unlock(&vm_object_list_lock); 1294 1295 free((caddr_t) backing_object, M_VMOBJ); 1296 1297 object_collapses++; 1298 } else { 1299 /* 1300 * If all of the pages in the backing object are 1301 * shadowed by the parent object, the parent object no 1302 * longer has to shadow the backing object; it can 1303 * shadow the next one in the chain. 1304 * 1305 * The backing object must not be paged out - we'd have 1306 * to check all of the paged-out pages, as well. 1307 */ 1308 1309 if (backing_object->pager != NULL) { 1310 vm_object_unlock(backing_object); 1311 return; 1312 } 1313 /* 1314 * Should have a check for a 'small' number of pages 1315 * here. 1316 */ 1317 1318 for (p = backing_object->memq.tqh_first; p; p = p->listq.tqe_next) { 1319 new_offset = (p->offset - backing_offset); 1320 1321 /* 1322 * If the parent has a page here, or if this 1323 * page falls outside the parent, keep going. 1324 * 1325 * Otherwise, the backing_object must be left in 1326 * the chain. 1327 */ 1328 1329 if (p->offset >= backing_offset && 1330 new_offset <= size && 1331 ((pp = vm_page_lookup(object, new_offset)) == NULL || 1332 !pp->valid) && 1333 (!object->pager || !vm_pager_has_page(object->pager, object->paging_offset + new_offset))) { 1334 /* 1335 * Page still needed. Can't go any 1336 * further. 1337 */ 1338 vm_object_unlock(backing_object); 1339 return; 1340 } 1341 } 1342 1343 /* 1344 * Make the parent shadow the next object in the 1345 * chain. Deallocating backing_object will not remove 1346 * it, since its reference count is at least 2. 1347 */ 1348 1349 TAILQ_REMOVE(&object->shadow->reverse_shadow_head, 1350 object, reverse_shadow_list); 1351 vm_object_reference(object->shadow = backing_object->shadow); 1352 if (object->shadow) 1353 TAILQ_INSERT_TAIL(&object->shadow->reverse_shadow_head, 1354 object, reverse_shadow_list); 1355 object->shadow_offset += backing_object->shadow_offset; 1356 1357 /* 1358 * Backing object might have had a copy pointer to us. 1359 * If it did, clear it. 1360 */ 1361 if (backing_object->copy == object) { 1362 backing_object->copy = NULL; 1363 } 1364 /* 1365 * Drop the reference count on backing_object. Since 1366 * its ref_count was at least 2, it will not vanish; 1367 * so we don't need to call vm_object_deallocate. 1368 */ 1369 if (backing_object->ref_count == 1) 1370 printf("should have called obj deallocate\n"); 1371 backing_object->ref_count--; 1372 vm_object_unlock(backing_object); 1373 1374 object_bypasses++; 1375 1376 } 1377 1378 /* 1379 * Try again with this object's new backing object. 1380 */ 1381 } 1382} 1383 1384/* 1385 * vm_object_page_remove: [internal] 1386 * 1387 * Removes all physical pages in the specified 1388 * object range from the object's list of pages. 1389 * 1390 * The object must be locked. 1391 */ 1392void 1393vm_object_page_remove(object, start, end, clean_only) 1394 register vm_object_t object; 1395 register vm_offset_t start; 1396 register vm_offset_t end; 1397 boolean_t clean_only; 1398{ 1399 register vm_page_t p, next; 1400 vm_offset_t size; 1401 int s; 1402 1403 if (object == NULL) 1404 return; 1405 1406 object->paging_in_progress++; 1407 start = trunc_page(start); 1408 end = round_page(end); 1409again: 1410 size = end - start; 1411 if (size > 4 * PAGE_SIZE || size >= object->size / 4) { 1412 for (p = object->memq.tqh_first; p != NULL; p = next) { 1413 next = p->listq.tqe_next; 1414 if ((start <= p->offset) && (p->offset < end)) { 1415 s = splhigh(); 1416 if (p->bmapped) { 1417 splx(s); 1418 continue; 1419 } 1420 if ((p->flags & PG_BUSY) || p->busy) { 1421 p->flags |= PG_WANTED; 1422 tsleep((caddr_t) p, PVM, "vmopar", 0); 1423 splx(s); 1424 goto again; 1425 } 1426 splx(s); 1427 if (clean_only) { 1428 vm_page_test_dirty(p); 1429 if (p->valid & p->dirty) 1430 continue; 1431 } 1432 vm_page_protect(p, VM_PROT_NONE); 1433 vm_page_lock_queues(); 1434 PAGE_WAKEUP(p); 1435 vm_page_free(p); 1436 vm_page_unlock_queues(); 1437 } 1438 } 1439 } else { 1440 while (size > 0) { 1441 while ((p = vm_page_lookup(object, start)) != 0) { 1442 s = splhigh(); 1443 if (p->bmapped) { 1444 splx(s); 1445 break; 1446 } 1447 if ((p->flags & PG_BUSY) || p->busy) { 1448 p->flags |= PG_WANTED; 1449 tsleep((caddr_t) p, PVM, "vmopar", 0); 1450 splx(s); 1451 goto again; 1452 } 1453 splx(s); 1454 if (clean_only) { 1455 vm_page_test_dirty(p); 1456 if (p->valid & p->dirty) 1457 continue; 1458 } 1459 vm_page_protect(p, VM_PROT_NONE); 1460 vm_page_lock_queues(); 1461 PAGE_WAKEUP(p); 1462 vm_page_free(p); 1463 vm_page_unlock_queues(); 1464 } 1465 start += PAGE_SIZE; 1466 size -= PAGE_SIZE; 1467 } 1468 } 1469 vm_object_pip_wakeup(object); 1470} 1471 1472/* 1473 * Routine: vm_object_coalesce 1474 * Function: Coalesces two objects backing up adjoining 1475 * regions of memory into a single object. 1476 * 1477 * returns TRUE if objects were combined. 1478 * 1479 * NOTE: Only works at the moment if the second object is NULL - 1480 * if it's not, which object do we lock first? 1481 * 1482 * Parameters: 1483 * prev_object First object to coalesce 1484 * prev_offset Offset into prev_object 1485 * next_object Second object into coalesce 1486 * next_offset Offset into next_object 1487 * 1488 * prev_size Size of reference to prev_object 1489 * next_size Size of reference to next_object 1490 * 1491 * Conditions: 1492 * The object must *not* be locked. 1493 */ 1494boolean_t 1495vm_object_coalesce(prev_object, next_object, 1496 prev_offset, next_offset, 1497 prev_size, next_size) 1498 register vm_object_t prev_object; 1499 vm_object_t next_object; 1500 vm_offset_t prev_offset, next_offset; 1501 vm_size_t prev_size, next_size; 1502{ 1503 vm_size_t newsize; 1504 1505 if (next_object != NULL) { 1506 return (FALSE); 1507 } 1508 if (prev_object == NULL) { 1509 return (TRUE); 1510 } 1511 vm_object_lock(prev_object); 1512 1513 /* 1514 * Try to collapse the object first 1515 */ 1516 vm_object_collapse(prev_object); 1517 1518 /* 1519 * Can't coalesce if: . more than one reference . paged out . shadows 1520 * another object . has a copy elsewhere (any of which mean that the 1521 * pages not mapped to prev_entry may be in use anyway) 1522 */ 1523 1524 if (prev_object->ref_count > 1 || 1525 prev_object->pager != NULL || 1526 prev_object->shadow != NULL || 1527 prev_object->copy != NULL) { 1528 vm_object_unlock(prev_object); 1529 return (FALSE); 1530 } 1531 /* 1532 * Remove any pages that may still be in the object from a previous 1533 * deallocation. 1534 */ 1535 1536 vm_object_page_remove(prev_object, 1537 prev_offset + prev_size, 1538 prev_offset + prev_size + next_size, FALSE); 1539 1540 /* 1541 * Extend the object if necessary. 1542 */ 1543 newsize = prev_offset + prev_size + next_size; 1544 if (newsize > prev_object->size) 1545 prev_object->size = newsize; 1546 1547 vm_object_unlock(prev_object); 1548 return (TRUE); 1549} 1550 1551/* 1552 * returns page after looking up in shadow chain 1553 */ 1554 1555vm_page_t 1556vm_object_page_lookup(object, offset) 1557 vm_object_t object; 1558 vm_offset_t offset; 1559{ 1560 vm_page_t m; 1561 1562 if (!(m = vm_page_lookup(object, offset))) { 1563 if (!object->shadow) 1564 return 0; 1565 else 1566 return vm_object_page_lookup(object->shadow, offset + object->shadow_offset); 1567 } 1568 return m; 1569} 1570 1571int 1572_vm_object_in_map(map, object, entry) 1573 vm_map_t map; 1574 vm_object_t object; 1575 vm_map_entry_t entry; 1576{ 1577 vm_map_t tmpm; 1578 vm_map_entry_t tmpe; 1579 vm_object_t obj; 1580 int entcount; 1581 1582 if (map == 0) 1583 return 0; 1584 1585 if (entry == 0) { 1586 tmpe = map->header.next; 1587 entcount = map->nentries; 1588 while (entcount-- && (tmpe != &map->header)) { 1589 if( _vm_object_in_map(map, object, tmpe)) { 1590 return 1; 1591 } 1592 tmpe = tmpe->next; 1593 } 1594 } else if (entry->is_sub_map || entry->is_a_map) { 1595 tmpm = entry->object.share_map; 1596 tmpe = tmpm->header.next; 1597 entcount = tmpm->nentries; 1598 while (entcount-- && tmpe != &tmpm->header) { 1599 if( _vm_object_in_map(tmpm, object, tmpe)) { 1600 return 1; 1601 } 1602 tmpe = tmpe->next; 1603 } 1604 } else if (obj = entry->object.vm_object) { 1605 for(; obj; obj=obj->shadow) 1606 if( obj == object) { 1607 return 1; 1608 } 1609 } 1610 return 0; 1611} 1612 1613int 1614vm_object_in_map( object) 1615 vm_object_t object; 1616{ 1617 struct proc *p; 1618 for (p = (struct proc *) allproc; p != NULL; p = p->p_next) { 1619 if( !p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */) 1620 continue; 1621/* 1622 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 1623 continue; 1624 } 1625*/ 1626 if( _vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) 1627 return 1; 1628 } 1629 if( _vm_object_in_map( kernel_map, object, 0)) 1630 return 1; 1631 if( _vm_object_in_map( kmem_map, object, 0)) 1632 return 1; 1633 if( _vm_object_in_map( pager_map, object, 0)) 1634 return 1; 1635 if( _vm_object_in_map( buffer_map, object, 0)) 1636 return 1; 1637 if( _vm_object_in_map( io_map, object, 0)) 1638 return 1; 1639 if( _vm_object_in_map( phys_map, object, 0)) 1640 return 1; 1641 if( _vm_object_in_map( mb_map, object, 0)) 1642 return 1; 1643 if( _vm_object_in_map( u_map, object, 0)) 1644 return 1; 1645 return 0; 1646} 1647 1648void 1649vm_object_check() { 1650 int i; 1651 int maxhash = 0; 1652 vm_object_t object; 1653 vm_object_hash_entry_t entry; 1654 1655 /* 1656 * make sure that no internal objs are hashed 1657 */ 1658 for (i=0; i<VM_OBJECT_HASH_COUNT;i++) { 1659 int lsize = 0; 1660 for (entry = vm_object_hashtable[i].tqh_first; 1661 entry != NULL; 1662 entry = entry->hash_links.tqe_next) { 1663 if( entry->object->flags & OBJ_INTERNAL) { 1664 printf("vmochk: internal obj on hash: size: %d\n", entry->object->size); 1665 } 1666 ++lsize; 1667 } 1668 if( lsize > maxhash) 1669 maxhash = lsize; 1670 } 1671 1672 printf("maximum object hash queue size: %d\n", maxhash); 1673 1674 /* 1675 * make sure that internal objs are in a map somewhere 1676 * and none have zero ref counts. 1677 */ 1678 for (object = vm_object_list.tqh_first; 1679 object != NULL; 1680 object = object->object_list.tqe_next) { 1681 if( object->flags & OBJ_INTERNAL) { 1682 if( object->ref_count == 0) { 1683 printf("vmochk: internal obj has zero ref count: %d\n", 1684 object->size); 1685 } 1686 if( !vm_object_in_map(object)) { 1687 printf("vmochk: internal obj is not in a map: ref: %d, size: %d, pager: 0x%x, shadow: 0x%x\n", object->ref_count, object->size, object->pager, object->shadow); 1688 } 1689 } 1690 } 1691} 1692 1693#define DEBUG 1694#if defined(DEBUG) || defined(DDB) 1695/* 1696 * vm_object_print: [ debug ] 1697 */ 1698void 1699vm_object_print(object, full) 1700 vm_object_t object; 1701 boolean_t full; 1702{ 1703 register vm_page_t p; 1704 1705 register int count; 1706 1707 if (object == NULL) 1708 return; 1709 1710 iprintf("Object 0x%x: size=0x%x, res=%d, ref=%d, ", 1711 (int) object, (int) object->size, 1712 object->resident_page_count, object->ref_count); 1713 printf("pager=0x%x+0x%x, shadow=(0x%x)+0x%x\n", 1714 (int) object->pager, (int) object->paging_offset, 1715 (int) object->shadow, (int) object->shadow_offset); 1716 printf("cache: next=%p, prev=%p\n", 1717 object->cached_list.tqe_next, object->cached_list.tqe_prev); 1718 1719 if (!full) 1720 return; 1721 1722 indent += 2; 1723 count = 0; 1724 for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) { 1725 if (count == 0) 1726 iprintf("memory:="); 1727 else if (count == 6) { 1728 printf("\n"); 1729 iprintf(" ..."); 1730 count = 0; 1731 } else 1732 printf(","); 1733 count++; 1734 1735 printf("(off=0x%lx,page=0x%lx)", 1736 (u_long) p->offset, (u_long) VM_PAGE_TO_PHYS(p)); 1737 } 1738 if (count != 0) 1739 printf("\n"); 1740 indent -= 2; 1741} 1742#endif /* defined(DEBUG) || defined(DDB) */ 1743