vm_object.c revision 7346
1/* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $Id: vm_object.c,v 1.38 1995/03/23 05:19:44 davidg Exp $ 65 */ 66 67/* 68 * Virtual memory object module. 69 */ 70 71#include <sys/param.h> 72#include <sys/systm.h> 73#include <sys/kernel.h> 74#include <sys/proc.h> /* for curproc, pageproc */ 75#include <sys/malloc.h> 76#include <sys/vnode.h> 77#include <sys/mount.h> 78 79#include <vm/vm.h> 80#include <vm/vm_page.h> 81#include <vm/vm_pageout.h> 82#include <vm/vm_pager.h> 83#include <vm/swap_pager.h> 84#include <vm/vnode_pager.h> 85#include <vm/vm_kern.h> 86 87static void _vm_object_allocate(vm_size_t, vm_object_t); 88 89/* 90 * Virtual memory objects maintain the actual data 91 * associated with allocated virtual memory. A given 92 * page of memory exists within exactly one object. 93 * 94 * An object is only deallocated when all "references" 95 * are given up. Only one "reference" to a given 96 * region of an object should be writeable. 97 * 98 * Associated with each object is a list of all resident 99 * memory pages belonging to that object; this list is 100 * maintained by the "vm_page" module, and locked by the object's 101 * lock. 102 * 103 * Each object also records a "pager" routine which is 104 * used to retrieve (and store) pages to the proper backing 105 * storage. In addition, objects may be backed by other 106 * objects from which they were virtual-copied. 107 * 108 * The only items within the object structure which are 109 * modified after time of creation are: 110 * reference count locked by object's lock 111 * pager routine locked by object's lock 112 * 113 */ 114 115 116struct vm_object kernel_object_store; 117struct vm_object kmem_object_store; 118 119int vm_object_cache_max; 120 121#define VM_OBJECT_HASH_COUNT 509 122 123struct vm_object_hash_head vm_object_hashtable[VM_OBJECT_HASH_COUNT]; 124 125long object_collapses = 0; 126long object_bypasses = 0; 127 128static void 129_vm_object_allocate(size, object) 130 vm_size_t size; 131 register vm_object_t object; 132{ 133 TAILQ_INIT(&object->memq); 134 TAILQ_INIT(&object->reverse_shadow_head); 135 136 object->size = size; 137 object->ref_count = 1; 138 vm_object_lock_init(object); 139 object->flags = OBJ_INTERNAL; /* pager will reset */ 140 object->paging_in_progress = 0; 141 object->resident_page_count = 0; 142 143 object->pager = NULL; 144 object->paging_offset = 0; 145 object->shadow = NULL; 146 object->shadow_offset = (vm_offset_t) 0; 147 object->copy = NULL; 148 149 object->last_read = 0; 150 151 simple_lock(&vm_object_list_lock); 152 TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); 153 vm_object_count++; 154 simple_unlock(&vm_object_list_lock); 155} 156 157/* 158 * vm_object_init: 159 * 160 * Initialize the VM objects module. 161 */ 162void 163vm_object_init(vm_offset_t nothing) 164{ 165 register int i; 166 167 TAILQ_INIT(&vm_object_cached_list); 168 TAILQ_INIT(&vm_object_list); 169 vm_object_count = 0; 170 simple_lock_init(&vm_cache_lock); 171 simple_lock_init(&vm_object_list_lock); 172 173 vm_object_cache_max = 84; 174 if (cnt.v_page_count > 1000) 175 vm_object_cache_max += (cnt.v_page_count - 1000) / 4; 176 177 for (i = 0; i < VM_OBJECT_HASH_COUNT; i++) 178 TAILQ_INIT(&vm_object_hashtable[i]); 179 180 kernel_object = &kernel_object_store; 181 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS, 182 kernel_object); 183 184 kmem_object = &kmem_object_store; 185 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS, 186 kmem_object); 187} 188 189/* 190 * vm_object_allocate: 191 * 192 * Returns a new object with the given size. 193 */ 194 195vm_object_t 196vm_object_allocate(size) 197 vm_size_t size; 198{ 199 register vm_object_t result; 200 201 result = (vm_object_t) 202 malloc((u_long) sizeof *result, M_VMOBJ, M_WAITOK); 203 204 205 _vm_object_allocate(size, result); 206 207 return (result); 208} 209 210 211/* 212 * vm_object_reference: 213 * 214 * Gets another reference to the given object. 215 */ 216inline void 217vm_object_reference(object) 218 register vm_object_t object; 219{ 220 if (object == NULL) 221 return; 222 223 vm_object_lock(object); 224 object->ref_count++; 225 vm_object_unlock(object); 226} 227 228/* 229 * vm_object_deallocate: 230 * 231 * Release a reference to the specified object, 232 * gained either through a vm_object_allocate 233 * or a vm_object_reference call. When all references 234 * are gone, storage associated with this object 235 * may be relinquished. 236 * 237 * No object may be locked. 238 */ 239void 240vm_object_deallocate(object) 241 vm_object_t object; 242{ 243 vm_object_t temp; 244 vm_pager_t pager; 245 246 while (object != NULL) { 247 248 if (object->ref_count == 0) 249 panic("vm_object_deallocate: object deallocated too many times"); 250 251 /* 252 * The cache holds a reference (uncounted) to the object; we 253 * must lock it before removing the object. 254 */ 255 256 vm_object_cache_lock(); 257 258 /* 259 * Lose the reference 260 */ 261 vm_object_lock(object); 262 263 object->ref_count--; 264 265 if (object->ref_count != 0) { 266 if ((object->ref_count == 1) && 267 (object->flags & OBJ_INTERNAL)) { 268 vm_object_t robject; 269 robject = object->reverse_shadow_head.tqh_first; 270 if ((robject != NULL) && 271 (robject->flags & OBJ_INTERNAL)) { 272 int s; 273 robject->ref_count += 2; 274 object->ref_count += 2; 275 276 do { 277 s = splhigh(); 278 while (robject->paging_in_progress) { 279 robject->flags |= OBJ_PIPWNT; 280 tsleep(robject, PVM, "objde1", 0); 281 } 282 283 while (object->paging_in_progress) { 284 object->flags |= OBJ_PIPWNT; 285 tsleep(object, PVM, "objde2", 0); 286 } 287 splx(s); 288 289 } while( object->paging_in_progress || robject->paging_in_progress); 290 291 object->ref_count -= 2; 292 robject->ref_count -= 2; 293 if( robject->ref_count == 0) { 294 vm_object_unlock(object); 295 vm_object_cache_unlock(); 296 robject->ref_count += 1; 297 object = robject; 298 continue; 299 } 300 vm_object_cache_unlock(); 301 vm_object_unlock(object); 302 vm_object_lock(robject); 303 vm_object_collapse(robject); 304 return; 305 } 306 } 307 vm_object_unlock(object); 308 /* 309 * If there are still references, then we are done. 310 */ 311 vm_object_cache_unlock(); 312 return; 313 } 314 315 pager = object->pager; 316 317 if (pager && pager->pg_type == PG_VNODE) { 318 vn_pager_t vnp = (vn_pager_t) pager->pg_data; 319 320 vnp->vnp_vp->v_flag &= ~VTEXT; 321 } 322 323 /* 324 * See if this object can persist and has some resident 325 * pages. If so, enter it in the cache. 326 */ 327 if (object->flags & OBJ_CANPERSIST) { 328 if (object->resident_page_count != 0) { 329 TAILQ_INSERT_TAIL(&vm_object_cached_list, object, 330 cached_list); 331 vm_object_cached++; 332 vm_object_cache_unlock(); 333 334 vm_object_unlock(object); 335 336 vm_object_cache_trim(); 337 return; 338 } else { 339 object->flags &= ~OBJ_CANPERSIST; 340 } 341 } 342 343 /* 344 * Make sure no one can look us up now. 345 */ 346 object->flags |= OBJ_DEAD; 347 if ((object->flags & OBJ_INTERNAL) == 0) 348 vm_object_remove(pager); 349 vm_object_cache_unlock(); 350 351 temp = object->shadow; 352 if (temp) 353 TAILQ_REMOVE(&temp->reverse_shadow_head, object, reverse_shadow_list); 354 vm_object_terminate(object); 355 /* unlocks and deallocates object */ 356 object = temp; 357 } 358} 359 360/* 361 * vm_object_terminate actually destroys the specified object, freeing 362 * up all previously used resources. 363 * 364 * The object must be locked. 365 */ 366void 367vm_object_terminate(object) 368 register vm_object_t object; 369{ 370 register vm_page_t p, next; 371 vm_object_t shadow_object; 372 int s; 373 struct vnode *vp = NULL; 374 375 /* 376 * Detach the object from its shadow if we are the shadow's copy. 377 */ 378 if ((shadow_object = object->shadow) != NULL) { 379 vm_object_lock(shadow_object); 380 if (shadow_object->copy == object) 381 shadow_object->copy = NULL; 382#if 0 383 else if (shadow_object->copy != NULL) 384 panic("vm_object_terminate: copy/shadow inconsistency"); 385#endif 386 vm_object_unlock(shadow_object); 387 } 388 if (object->pager && (object->pager->pg_type == PG_VNODE)) { 389 vn_pager_t vnp = object->pager->pg_data; 390 391 vp = vnp->vnp_vp; 392 VOP_LOCK(vp); 393 vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0); 394 } 395 /* 396 * Wait until the pageout daemon is through with the object. 397 */ 398 399 s = splhigh(); 400 while (object->paging_in_progress) { 401 vm_object_unlock(object); 402 object->flags |= OBJ_PIPWNT; 403 tsleep((caddr_t) object, PVM, "objtrm", 0); 404 vm_object_lock(object); 405 } 406 splx(s); 407 408 /* 409 * While the paging system is locked, pull the object's pages off the 410 * active and inactive queues. This keeps the pageout daemon from 411 * playing with them during vm_pager_deallocate. 412 * 413 * We can't free the pages yet, because the object's pager may have to 414 * write them out before deallocating the paging space. 415 */ 416 417 for (p = object->memq.tqh_first; p; p = next) { 418 VM_PAGE_CHECK(p); 419 next = p->listq.tqe_next; 420 421 vm_page_lock_queues(); 422 if (p->flags & PG_CACHE) 423 vm_page_free(p); 424 else { 425 s = splhigh(); 426 vm_page_unqueue(p); 427 splx(s); 428 } 429 vm_page_unlock_queues(); 430 p = next; 431 } 432 433 if (object->paging_in_progress != 0) 434 panic("vm_object_deallocate: pageout in progress"); 435 436 /* 437 * Clean and free the pages, as appropriate. All references to the 438 * object are gone, so we don't need to lock it. 439 */ 440 if (vp != NULL) { 441 VOP_UNLOCK(vp); 442 vm_object_page_clean(object, 0, 0, TRUE); 443 VOP_LOCK(vp); 444 vinvalbuf(vp, 0, NOCRED, NULL, 0, 0); 445 VOP_UNLOCK(vp); 446 } 447 448 /* 449 * Now free the pages. For internal objects, this also removes them 450 * from paging queues. 451 */ 452 while ((p = object->memq.tqh_first) != NULL) { 453 VM_PAGE_CHECK(p); 454 vm_page_lock_queues(); 455 if (p->flags & PG_BUSY) 456 printf("vm_object_terminate: freeing busy page\n"); 457 PAGE_WAKEUP(p); 458 vm_page_free(p); 459 cnt.v_pfree++; 460 vm_page_unlock_queues(); 461 } 462 vm_object_unlock(object); 463 464 /* 465 * Let the pager know object is dead. 466 */ 467 if (object->pager != NULL) 468 vm_pager_deallocate(object->pager); 469 470 simple_lock(&vm_object_list_lock); 471 TAILQ_REMOVE(&vm_object_list, object, object_list); 472 vm_object_count--; 473 simple_unlock(&vm_object_list_lock); 474 475 wakeup(object); 476 477 /* 478 * Free the space for the object. 479 */ 480 free((caddr_t) object, M_VMOBJ); 481} 482 483/* 484 * vm_object_page_clean 485 * 486 * Clean all dirty pages in the specified range of object. 487 * Leaves page on whatever queue it is currently on. 488 * 489 * Odd semantics: if start == end, we clean everything. 490 * 491 * The object must be locked. 492 */ 493void 494vm_object_page_clean(object, start, end, syncio) 495 register vm_object_t object; 496 register vm_offset_t start; 497 register vm_offset_t end; 498 boolean_t syncio; 499{ 500 register vm_page_t p, nextp; 501 int size; 502 int s; 503 504 if (object->pager == NULL) 505 return; 506 507 if (start != end) { 508 start = trunc_page(start); 509 end = round_page(end); 510 } 511 size = end - start; 512 513again: 514 /* 515 * Wait until the pageout daemon is through with the object. 516 */ 517 s = splhigh(); 518 while (object->paging_in_progress) { 519 object->flags |= OBJ_PIPWNT; 520 tsleep(object, PVM, "objpcw", 0); 521 } 522 splx(s); 523 524 nextp = object->memq.tqh_first; 525 while ((p = nextp) && ((start == end) || (size != 0))) { 526 nextp = p->listq.tqe_next; 527 if (start == end || (p->offset >= start && p->offset < end)) { 528 if ((p->flags & PG_BUSY) || p->busy) { 529 s = splhigh(); 530 531 p->flags |= PG_WANTED; 532 tsleep(p, PVM, "objpcn", 0); 533 splx(s); 534 goto again; 535 } 536 size -= PAGE_SIZE; 537 538 vm_page_test_dirty(p); 539 540 if ((p->dirty & p->valid) != 0) { 541 vm_pageout_clean(p, VM_PAGEOUT_FORCE); 542 goto again; 543 } 544 } 545 } 546 return; 547} 548 549/* 550 * vm_object_deactivate_pages 551 * 552 * Deactivate all pages in the specified object. (Keep its pages 553 * in memory even though it is no longer referenced.) 554 * 555 * The object must be locked. 556 */ 557void 558vm_object_deactivate_pages(object) 559 register vm_object_t object; 560{ 561 register vm_page_t p, next; 562 563 for (p = object->memq.tqh_first; p != NULL; p = next) { 564 next = p->listq.tqe_next; 565 vm_page_lock_queues(); 566 vm_page_deactivate(p); 567 vm_page_unlock_queues(); 568 } 569} 570 571/* 572 * Trim the object cache to size. 573 */ 574void 575vm_object_cache_trim() 576{ 577 register vm_object_t object; 578 579 vm_object_cache_lock(); 580 while (vm_object_cached > vm_object_cache_max) { 581 object = vm_object_cached_list.tqh_first; 582 vm_object_cache_unlock(); 583 584 if (object != vm_object_lookup(object->pager)) 585 panic("vm_object_cache_trim: I'm sooo confused."); 586 587 pager_cache(object, FALSE); 588 589 vm_object_cache_lock(); 590 } 591 vm_object_cache_unlock(); 592} 593 594 595/* 596 * vm_object_pmap_copy: 597 * 598 * Makes all physical pages in the specified 599 * object range copy-on-write. No writeable 600 * references to these pages should remain. 601 * 602 * The object must *not* be locked. 603 */ 604void 605vm_object_pmap_copy(object, start, end) 606 register vm_object_t object; 607 register vm_offset_t start; 608 register vm_offset_t end; 609{ 610 register vm_page_t p; 611 612 if (object == NULL) 613 return; 614 615 vm_object_lock(object); 616 for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) { 617 if ((start <= p->offset) && (p->offset < end)) { 618 vm_page_protect(p, VM_PROT_READ); 619 p->flags |= PG_COPYONWRITE; 620 } 621 } 622 vm_object_unlock(object); 623} 624 625/* 626 * vm_object_pmap_remove: 627 * 628 * Removes all physical pages in the specified 629 * object range from all physical maps. 630 * 631 * The object must *not* be locked. 632 */ 633void 634vm_object_pmap_remove(object, start, end) 635 register vm_object_t object; 636 register vm_offset_t start; 637 register vm_offset_t end; 638{ 639 register vm_page_t p; 640 int s; 641 642 if (object == NULL) 643 return; 644 ++object->paging_in_progress; 645 646 vm_object_lock(object); 647again: 648 for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) { 649 if ((start <= p->offset) && (p->offset < end)) { 650 s = splhigh(); 651 if ((p->flags & PG_BUSY) || p->busy) { 652 p->flags |= PG_WANTED; 653 tsleep((caddr_t) p, PVM, "vmopmr", 0); 654 splx(s); 655 goto again; 656 } 657 splx(s); 658 vm_page_protect(p, VM_PROT_NONE); 659 } 660 } 661 vm_object_unlock(object); 662 vm_object_pip_wakeup(object); 663} 664 665/* 666 * vm_object_copy: 667 * 668 * Create a new object which is a copy of an existing 669 * object, and mark all of the pages in the existing 670 * object 'copy-on-write'. The new object has one reference. 671 * Returns the new object. 672 * 673 * May defer the copy until later if the object is not backed 674 * up by a non-default pager. 675 */ 676void 677vm_object_copy(src_object, src_offset, size, 678 dst_object, dst_offset, src_needs_copy) 679 register vm_object_t src_object; 680 vm_offset_t src_offset; 681 vm_size_t size; 682 vm_object_t *dst_object;/* OUT */ 683 vm_offset_t *dst_offset;/* OUT */ 684 boolean_t *src_needs_copy; /* OUT */ 685{ 686 register vm_object_t new_copy; 687 register vm_object_t old_copy; 688 vm_offset_t new_start, new_end; 689 690 register vm_page_t p; 691 692 if (src_object == NULL) { 693 /* 694 * Nothing to copy 695 */ 696 *dst_object = NULL; 697 *dst_offset = 0; 698 *src_needs_copy = FALSE; 699 return; 700 } 701 /* 702 * If the object's pager is null_pager or the default pager, we don't 703 * have to make a copy of it. Instead, we set the needs copy flag and 704 * make a shadow later. 705 */ 706 707 vm_object_lock(src_object); 708 709 /* 710 * Try to collapse the object before copying it. 711 */ 712 713 vm_object_collapse(src_object); 714 715 if (src_object->pager == NULL || 716 (src_object->flags & OBJ_INTERNAL)) { 717 718 /* 719 * Make another reference to the object 720 */ 721 src_object->ref_count++; 722 723 /* 724 * Mark all of the pages copy-on-write. 725 */ 726 for (p = src_object->memq.tqh_first; p; p = p->listq.tqe_next) 727 if (src_offset <= p->offset && 728 p->offset < src_offset + size) 729 p->flags |= PG_COPYONWRITE; 730 vm_object_unlock(src_object); 731 732 *dst_object = src_object; 733 *dst_offset = src_offset; 734 735 /* 736 * Must make a shadow when write is desired 737 */ 738 *src_needs_copy = TRUE; 739 return; 740 } 741 /* 742 * If the object has a pager, the pager wants to see all of the 743 * changes. We need a copy-object for the changed pages. 744 * 745 * If there is a copy-object, and it is empty, no changes have been made 746 * to the object since the copy-object was made. We can use the same 747 * copy- object. 748 */ 749 750Retry1: 751 old_copy = src_object->copy; 752 if (old_copy != NULL) { 753 /* 754 * Try to get the locks (out of order) 755 */ 756 if (!vm_object_lock_try(old_copy)) { 757 vm_object_unlock(src_object); 758 759 /* should spin a bit here... */ 760 tsleep((caddr_t) old_copy, PVM, "cpylck", 1); 761 vm_object_lock(src_object); 762 goto Retry1; 763 } 764 if (old_copy->resident_page_count == 0 && 765 old_copy->pager == NULL) { 766 /* 767 * Return another reference to the existing 768 * copy-object. 769 */ 770 old_copy->ref_count++; 771 vm_object_unlock(old_copy); 772 vm_object_unlock(src_object); 773 *dst_object = old_copy; 774 *dst_offset = src_offset; 775 *src_needs_copy = FALSE; 776 return; 777 } 778 vm_object_unlock(old_copy); 779 } 780 vm_object_unlock(src_object); 781 782 /* 783 * If the object has a pager, the pager wants to see all of the 784 * changes. We must make a copy-object and put the changed pages 785 * there. 786 * 787 * The copy-object is always made large enough to completely shadow the 788 * original object, since it may have several users who want to shadow 789 * the original object at different points. 790 */ 791 792 new_copy = vm_object_allocate(src_object->size); 793 794Retry2: 795 vm_object_lock(src_object); 796 /* 797 * Copy object may have changed while we were unlocked 798 */ 799 old_copy = src_object->copy; 800 if (old_copy != NULL) { 801 /* 802 * Try to get the locks (out of order) 803 */ 804 if (!vm_object_lock_try(old_copy)) { 805 vm_object_unlock(src_object); 806 tsleep((caddr_t) old_copy, PVM, "cpylck", 1); 807 goto Retry2; 808 } 809 /* 810 * Consistency check 811 */ 812 if (old_copy->shadow != src_object || 813 old_copy->shadow_offset != (vm_offset_t) 0) 814 panic("vm_object_copy: copy/shadow inconsistency"); 815 816 /* 817 * Make the old copy-object shadow the new one. It will 818 * receive no more pages from the original object. 819 */ 820 821 src_object->ref_count--; /* remove ref. from old_copy */ 822 if (old_copy->shadow) 823 TAILQ_REMOVE(&old_copy->shadow->reverse_shadow_head, old_copy, reverse_shadow_list); 824 old_copy->shadow = new_copy; 825 TAILQ_INSERT_TAIL(&old_copy->shadow->reverse_shadow_head, old_copy, reverse_shadow_list); 826 new_copy->ref_count++; /* locking not needed - we have the 827 * only pointer */ 828 vm_object_unlock(old_copy); /* done with old_copy */ 829 } 830 new_start = (vm_offset_t) 0; /* always shadow original at 0 */ 831 new_end = (vm_offset_t) new_copy->size; /* for the whole object */ 832 833 /* 834 * Point the new copy at the existing object. 835 */ 836 837 new_copy->shadow = src_object; 838 TAILQ_INSERT_TAIL(&new_copy->shadow->reverse_shadow_head, new_copy, reverse_shadow_list); 839 new_copy->shadow_offset = new_start; 840 src_object->ref_count++; 841 src_object->copy = new_copy; 842 843 /* 844 * Mark all the affected pages of the existing object copy-on-write. 845 */ 846 for (p = src_object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) 847 if ((new_start <= p->offset) && (p->offset < new_end)) 848 p->flags |= PG_COPYONWRITE; 849 850 vm_object_unlock(src_object); 851 852 *dst_object = new_copy; 853 *dst_offset = src_offset - new_start; 854 *src_needs_copy = FALSE; 855} 856 857/* 858 * vm_object_shadow: 859 * 860 * Create a new object which is backed by the 861 * specified existing object range. The source 862 * object reference is deallocated. 863 * 864 * The new object and offset into that object 865 * are returned in the source parameters. 866 */ 867 868void 869vm_object_shadow(object, offset, length) 870 vm_object_t *object; /* IN/OUT */ 871 vm_offset_t *offset; /* IN/OUT */ 872 vm_size_t length; 873{ 874 register vm_object_t source; 875 register vm_object_t result; 876 877 source = *object; 878 879 /* 880 * Allocate a new object with the given length 881 */ 882 883 if ((result = vm_object_allocate(length)) == NULL) 884 panic("vm_object_shadow: no object for shadowing"); 885 886 /* 887 * The new object shadows the source object, adding a reference to it. 888 * Our caller changes his reference to point to the new object, 889 * removing a reference to the source object. Net result: no change 890 * of reference count. 891 */ 892 result->shadow = source; 893 if (source) 894 TAILQ_INSERT_TAIL(&result->shadow->reverse_shadow_head, result, reverse_shadow_list); 895 896 /* 897 * Store the offset into the source object, and fix up the offset into 898 * the new object. 899 */ 900 901 result->shadow_offset = *offset; 902 903 /* 904 * Return the new things 905 */ 906 907 *offset = 0; 908 *object = result; 909} 910 911/* 912 * vm_object_hash hashes the pager/id pair. 913 */ 914 915#define vm_object_hash(pager) \ 916 (((unsigned)pager >> 5)%VM_OBJECT_HASH_COUNT) 917 918/* 919 * vm_object_lookup looks in the object cache for an object with the 920 * specified pager and paging id. 921 */ 922 923vm_object_t 924vm_object_lookup(pager) 925 vm_pager_t pager; 926{ 927 register vm_object_hash_entry_t entry; 928 vm_object_t object; 929 930 cnt.v_lookups++; 931 vm_object_cache_lock(); 932 933 for (entry = vm_object_hashtable[vm_object_hash(pager)].tqh_first; 934 entry != NULL; 935 entry = entry->hash_links.tqe_next) { 936 object = entry->object; 937 if (object->pager == pager) { 938 vm_object_lock(object); 939 if (object->ref_count == 0) { 940 TAILQ_REMOVE(&vm_object_cached_list, object, 941 cached_list); 942 vm_object_cached--; 943 } 944 object->ref_count++; 945 vm_object_unlock(object); 946 vm_object_cache_unlock(); 947 cnt.v_hits++; 948 return (object); 949 } 950 } 951 952 vm_object_cache_unlock(); 953 return (NULL); 954} 955 956/* 957 * vm_object_enter enters the specified object/pager/id into 958 * the hash table. 959 */ 960 961void 962vm_object_enter(object, pager) 963 vm_object_t object; 964 vm_pager_t pager; 965{ 966 struct vm_object_hash_head *bucket; 967 register vm_object_hash_entry_t entry; 968 969 /* 970 * We don't cache null objects, and we can't cache objects with the 971 * null pager. 972 */ 973 974 if (object == NULL) 975 return; 976 if (pager == NULL) 977 return; 978 979 bucket = &vm_object_hashtable[vm_object_hash(pager)]; 980 entry = (vm_object_hash_entry_t) 981 malloc((u_long) sizeof *entry, M_VMOBJHASH, M_WAITOK); 982 entry->object = object; 983 984 vm_object_cache_lock(); 985 TAILQ_INSERT_TAIL(bucket, entry, hash_links); 986 vm_object_cache_unlock(); 987} 988 989/* 990 * vm_object_remove: 991 * 992 * Remove the pager from the hash table. 993 * Note: This assumes that the object cache 994 * is locked. XXX this should be fixed 995 * by reorganizing vm_object_deallocate. 996 */ 997void 998vm_object_remove(pager) 999 register vm_pager_t pager; 1000{ 1001 struct vm_object_hash_head *bucket; 1002 register vm_object_hash_entry_t entry; 1003 register vm_object_t object; 1004 1005 bucket = &vm_object_hashtable[vm_object_hash(pager)]; 1006 1007 for (entry = bucket->tqh_first; 1008 entry != NULL; 1009 entry = entry->hash_links.tqe_next) { 1010 object = entry->object; 1011 if (object->pager == pager) { 1012 TAILQ_REMOVE(bucket, entry, hash_links); 1013 free((caddr_t) entry, M_VMOBJHASH); 1014 break; 1015 } 1016 } 1017} 1018 1019/* 1020 * this version of collapse allows the operation to occur earlier and 1021 * when paging_in_progress is true for an object... This is not a complete 1022 * operation, but should plug 99.9% of the rest of the leaks. 1023 */ 1024static void 1025vm_object_qcollapse(object) 1026 register vm_object_t object; 1027{ 1028 register vm_object_t backing_object; 1029 register vm_offset_t backing_offset, new_offset; 1030 register vm_page_t p, pp; 1031 register vm_size_t size; 1032 1033 backing_object = object->shadow; 1034 if (backing_object->shadow != NULL && 1035 backing_object->shadow->copy == backing_object) 1036 return; 1037 if (backing_object->ref_count != 1) 1038 return; 1039 1040 backing_object->ref_count += 2; 1041 1042 backing_offset = object->shadow_offset; 1043 size = object->size; 1044 p = backing_object->memq.tqh_first; 1045 while (p) { 1046 vm_page_t next; 1047 1048 next = p->listq.tqe_next; 1049 if ((p->flags & (PG_BUSY | PG_FICTITIOUS | PG_CACHE)) || 1050 !p->valid || p->hold_count || p->wire_count || p->busy || p->bmapped) { 1051 p = next; 1052 continue; 1053 } 1054 vm_page_protect(p, VM_PROT_NONE); 1055 new_offset = (p->offset - backing_offset); 1056 if (p->offset < backing_offset || 1057 new_offset >= size) { 1058 if (backing_object->pager) 1059 swap_pager_freespace(backing_object->pager, 1060 backing_object->paging_offset + p->offset, PAGE_SIZE); 1061 vm_page_lock_queues(); 1062 vm_page_free(p); 1063 vm_page_unlock_queues(); 1064 } else { 1065 pp = vm_page_lookup(object, new_offset); 1066 if (pp != NULL || (object->pager && vm_pager_has_page(object->pager, 1067 object->paging_offset + new_offset))) { 1068 if (backing_object->pager) 1069 swap_pager_freespace(backing_object->pager, 1070 backing_object->paging_offset + p->offset, PAGE_SIZE); 1071 vm_page_lock_queues(); 1072 vm_page_free(p); 1073 vm_page_unlock_queues(); 1074 } else { 1075 if( backing_object->pager) 1076 swap_pager_freespace(backing_object->pager, 1077 backing_object->paging_offset + p->offset, PAGE_SIZE); 1078 vm_page_rename(p, object, new_offset); 1079 p->dirty = VM_PAGE_BITS_ALL; 1080 } 1081 } 1082 p = next; 1083 } 1084 backing_object->ref_count -= 2; 1085} 1086 1087boolean_t vm_object_collapse_allowed = TRUE; 1088 1089/* 1090 * vm_object_collapse: 1091 * 1092 * Collapse an object with the object backing it. 1093 * Pages in the backing object are moved into the 1094 * parent, and the backing object is deallocated. 1095 * 1096 * Requires that the object be locked and the page 1097 * queues be unlocked. 1098 * 1099 * This routine has significant changes by John S. Dyson 1100 * to fix some swap memory leaks. 18 Dec 93 1101 * 1102 */ 1103void 1104vm_object_collapse(object) 1105 register vm_object_t object; 1106 1107{ 1108 register vm_object_t backing_object; 1109 register vm_offset_t backing_offset; 1110 register vm_size_t size; 1111 register vm_offset_t new_offset; 1112 register vm_page_t p, pp; 1113 1114 if (!vm_object_collapse_allowed) 1115 return; 1116 1117 while (TRUE) { 1118 /* 1119 * Verify that the conditions are right for collapse: 1120 * 1121 * The object exists and no pages in it are currently being paged 1122 * out. 1123 */ 1124 if (object == NULL) 1125 return; 1126 1127 /* 1128 * Make sure there is a backing object. 1129 */ 1130 if ((backing_object = object->shadow) == NULL) 1131 return; 1132 1133 /* 1134 * we check the backing object first, because it is most likely 1135 * !OBJ_INTERNAL. 1136 */ 1137 if ((backing_object->flags & OBJ_INTERNAL) == 0 || 1138 (backing_object->flags & OBJ_DEAD) || 1139 (object->flags & OBJ_INTERNAL) == 0 || 1140 (object->flags & OBJ_DEAD)) 1141 return; 1142 1143 if (object->paging_in_progress != 0 || 1144 backing_object->paging_in_progress != 0) { 1145 if (vm_object_lock_try(backing_object)) { 1146 vm_object_qcollapse(object); 1147 vm_object_unlock(backing_object); 1148 } 1149 return; 1150 } 1151 1152 vm_object_lock(backing_object); 1153 1154 /* 1155 * The backing object can't be a copy-object: the 1156 * shadow_offset for the copy-object must stay as 0. 1157 * Furthermore (for the 'we have all the pages' case), if we 1158 * bypass backing_object and just shadow the next object in 1159 * the chain, old pages from that object would then have to be 1160 * copied BOTH into the (former) backing_object and into the 1161 * parent object. 1162 */ 1163 if (backing_object->shadow != NULL && 1164 backing_object->shadow->copy == backing_object) { 1165 vm_object_unlock(backing_object); 1166 return; 1167 } 1168 1169 /* 1170 * We know that we can either collapse the backing object (if 1171 * the parent is the only reference to it) or (perhaps) remove 1172 * the parent's reference to it. 1173 */ 1174 1175 backing_offset = object->shadow_offset; 1176 size = object->size; 1177 1178 /* 1179 * If there is exactly one reference to the backing object, we 1180 * can collapse it into the parent. 1181 */ 1182 1183 if (backing_object->ref_count == 1) { 1184 1185 backing_object->flags |= OBJ_DEAD; 1186 /* 1187 * We can collapse the backing object. 1188 * 1189 * Move all in-memory pages from backing_object to the 1190 * parent. Pages that have been paged out will be 1191 * overwritten by any of the parent's pages that 1192 * shadow them. 1193 */ 1194 1195 while ((p = backing_object->memq.tqh_first) != 0) { 1196 1197 new_offset = (p->offset - backing_offset); 1198 1199 /* 1200 * If the parent has a page here, or if this 1201 * page falls outside the parent, dispose of 1202 * it. 1203 * 1204 * Otherwise, move it as planned. 1205 */ 1206 1207 if (p->offset < backing_offset || 1208 new_offset >= size) { 1209 vm_page_lock_queues(); 1210 vm_page_protect(p, VM_PROT_NONE); 1211 PAGE_WAKEUP(p); 1212 vm_page_free(p); 1213 vm_page_unlock_queues(); 1214 } else { 1215 pp = vm_page_lookup(object, new_offset); 1216 if (pp != NULL || (object->pager && vm_pager_has_page(object->pager, 1217 object->paging_offset + new_offset))) { 1218 vm_page_lock_queues(); 1219 vm_page_protect(p, VM_PROT_NONE); 1220 PAGE_WAKEUP(p); 1221 vm_page_free(p); 1222 vm_page_unlock_queues(); 1223 } else { 1224 vm_page_rename(p, object, new_offset); 1225 } 1226 } 1227 } 1228 1229 /* 1230 * Move the pager from backing_object to object. 1231 */ 1232 1233 if (backing_object->pager) { 1234 backing_object->paging_in_progress++; 1235 if (object->pager) { 1236 vm_pager_t bopager; 1237 1238 object->paging_in_progress++; 1239 /* 1240 * copy shadow object pages into ours 1241 * and destroy unneeded pages in 1242 * shadow object. 1243 */ 1244 bopager = backing_object->pager; 1245 backing_object->pager = NULL; 1246 swap_pager_copy( 1247 bopager, backing_object->paging_offset, 1248 object->pager, object->paging_offset, 1249 object->shadow_offset); 1250 vm_object_pip_wakeup(object); 1251 } else { 1252 object->paging_in_progress++; 1253 /* 1254 * grab the shadow objects pager 1255 */ 1256 object->pager = backing_object->pager; 1257 object->paging_offset = backing_object->paging_offset + backing_offset; 1258 backing_object->pager = NULL; 1259 /* 1260 * free unnecessary blocks 1261 */ 1262 swap_pager_freespace(object->pager, 0, object->paging_offset); 1263 vm_object_pip_wakeup(object); 1264 } 1265 1266 vm_object_pip_wakeup(backing_object); 1267 } 1268 /* 1269 * Object now shadows whatever backing_object did. 1270 * Note that the reference to backing_object->shadow 1271 * moves from within backing_object to within object. 1272 */ 1273 1274 TAILQ_REMOVE(&object->shadow->reverse_shadow_head, object, 1275 reverse_shadow_list); 1276 if (backing_object->shadow) 1277 TAILQ_REMOVE(&backing_object->shadow->reverse_shadow_head, 1278 backing_object, reverse_shadow_list); 1279 object->shadow = backing_object->shadow; 1280 if (object->shadow) 1281 TAILQ_INSERT_TAIL(&object->shadow->reverse_shadow_head, 1282 object, reverse_shadow_list); 1283 1284 object->shadow_offset += backing_object->shadow_offset; 1285 /* 1286 * Discard backing_object. 1287 * 1288 * Since the backing object has no pages, no pager left, 1289 * and no object references within it, all that is 1290 * necessary is to dispose of it. 1291 */ 1292 1293 vm_object_unlock(backing_object); 1294 1295 simple_lock(&vm_object_list_lock); 1296 TAILQ_REMOVE(&vm_object_list, backing_object, 1297 object_list); 1298 vm_object_count--; 1299 simple_unlock(&vm_object_list_lock); 1300 1301 free((caddr_t) backing_object, M_VMOBJ); 1302 1303 object_collapses++; 1304 } else { 1305 /* 1306 * If all of the pages in the backing object are 1307 * shadowed by the parent object, the parent object no 1308 * longer has to shadow the backing object; it can 1309 * shadow the next one in the chain. 1310 * 1311 * The backing object must not be paged out - we'd have 1312 * to check all of the paged-out pages, as well. 1313 */ 1314 1315 if (backing_object->pager != NULL) { 1316 vm_object_unlock(backing_object); 1317 return; 1318 } 1319 /* 1320 * Should have a check for a 'small' number of pages 1321 * here. 1322 */ 1323 1324 for (p = backing_object->memq.tqh_first; p; p = p->listq.tqe_next) { 1325 new_offset = (p->offset - backing_offset); 1326 1327 /* 1328 * If the parent has a page here, or if this 1329 * page falls outside the parent, keep going. 1330 * 1331 * Otherwise, the backing_object must be left in 1332 * the chain. 1333 */ 1334 1335 if (p->offset >= backing_offset && 1336 new_offset <= size && 1337 ((pp = vm_page_lookup(object, new_offset)) == NULL || 1338 !pp->valid) && 1339 (!object->pager || !vm_pager_has_page(object->pager, object->paging_offset + new_offset))) { 1340 /* 1341 * Page still needed. Can't go any 1342 * further. 1343 */ 1344 vm_object_unlock(backing_object); 1345 return; 1346 } 1347 } 1348 1349 /* 1350 * Make the parent shadow the next object in the 1351 * chain. Deallocating backing_object will not remove 1352 * it, since its reference count is at least 2. 1353 */ 1354 1355 TAILQ_REMOVE(&object->shadow->reverse_shadow_head, 1356 object, reverse_shadow_list); 1357 vm_object_reference(object->shadow = backing_object->shadow); 1358 if (object->shadow) 1359 TAILQ_INSERT_TAIL(&object->shadow->reverse_shadow_head, 1360 object, reverse_shadow_list); 1361 object->shadow_offset += backing_object->shadow_offset; 1362 1363 /* 1364 * Backing object might have had a copy pointer to us. 1365 * If it did, clear it. 1366 */ 1367 if (backing_object->copy == object) { 1368 backing_object->copy = NULL; 1369 } 1370 /* 1371 * Drop the reference count on backing_object. Since 1372 * its ref_count was at least 2, it will not vanish; 1373 * so we don't need to call vm_object_deallocate. 1374 */ 1375 if (backing_object->ref_count == 1) 1376 printf("should have called obj deallocate\n"); 1377 backing_object->ref_count--; 1378 vm_object_unlock(backing_object); 1379 1380 object_bypasses++; 1381 1382 } 1383 1384 /* 1385 * Try again with this object's new backing object. 1386 */ 1387 } 1388} 1389 1390/* 1391 * vm_object_page_remove: [internal] 1392 * 1393 * Removes all physical pages in the specified 1394 * object range from the object's list of pages. 1395 * 1396 * The object must be locked. 1397 */ 1398void 1399vm_object_page_remove(object, start, end, clean_only) 1400 register vm_object_t object; 1401 register vm_offset_t start; 1402 register vm_offset_t end; 1403 boolean_t clean_only; 1404{ 1405 register vm_page_t p, next; 1406 vm_offset_t size; 1407 int s; 1408 1409 if (object == NULL) 1410 return; 1411 1412 object->paging_in_progress++; 1413 start = trunc_page(start); 1414 end = round_page(end); 1415again: 1416 size = end - start; 1417 if (size > 4 * PAGE_SIZE || size >= object->size / 4) { 1418 for (p = object->memq.tqh_first; p != NULL; p = next) { 1419 next = p->listq.tqe_next; 1420 if ((start <= p->offset) && (p->offset < end)) { 1421 s = splhigh(); 1422 if (p->bmapped) { 1423 splx(s); 1424 continue; 1425 } 1426 if ((p->flags & PG_BUSY) || p->busy) { 1427 p->flags |= PG_WANTED; 1428 tsleep((caddr_t) p, PVM, "vmopar", 0); 1429 splx(s); 1430 goto again; 1431 } 1432 splx(s); 1433 if (clean_only) { 1434 vm_page_test_dirty(p); 1435 if (p->valid & p->dirty) 1436 continue; 1437 } 1438 vm_page_protect(p, VM_PROT_NONE); 1439 vm_page_lock_queues(); 1440 PAGE_WAKEUP(p); 1441 vm_page_free(p); 1442 vm_page_unlock_queues(); 1443 } 1444 } 1445 } else { 1446 while (size > 0) { 1447 while ((p = vm_page_lookup(object, start)) != 0) { 1448 s = splhigh(); 1449 if (p->bmapped) { 1450 splx(s); 1451 break; 1452 } 1453 if ((p->flags & PG_BUSY) || p->busy) { 1454 p->flags |= PG_WANTED; 1455 tsleep((caddr_t) p, PVM, "vmopar", 0); 1456 splx(s); 1457 goto again; 1458 } 1459 splx(s); 1460 if (clean_only) { 1461 vm_page_test_dirty(p); 1462 if (p->valid & p->dirty) 1463 continue; 1464 } 1465 vm_page_protect(p, VM_PROT_NONE); 1466 vm_page_lock_queues(); 1467 PAGE_WAKEUP(p); 1468 vm_page_free(p); 1469 vm_page_unlock_queues(); 1470 } 1471 start += PAGE_SIZE; 1472 size -= PAGE_SIZE; 1473 } 1474 } 1475 vm_object_pip_wakeup(object); 1476} 1477 1478/* 1479 * Routine: vm_object_coalesce 1480 * Function: Coalesces two objects backing up adjoining 1481 * regions of memory into a single object. 1482 * 1483 * returns TRUE if objects were combined. 1484 * 1485 * NOTE: Only works at the moment if the second object is NULL - 1486 * if it's not, which object do we lock first? 1487 * 1488 * Parameters: 1489 * prev_object First object to coalesce 1490 * prev_offset Offset into prev_object 1491 * next_object Second object into coalesce 1492 * next_offset Offset into next_object 1493 * 1494 * prev_size Size of reference to prev_object 1495 * next_size Size of reference to next_object 1496 * 1497 * Conditions: 1498 * The object must *not* be locked. 1499 */ 1500boolean_t 1501vm_object_coalesce(prev_object, next_object, 1502 prev_offset, next_offset, 1503 prev_size, next_size) 1504 register vm_object_t prev_object; 1505 vm_object_t next_object; 1506 vm_offset_t prev_offset, next_offset; 1507 vm_size_t prev_size, next_size; 1508{ 1509 vm_size_t newsize; 1510 1511 if (next_object != NULL) { 1512 return (FALSE); 1513 } 1514 if (prev_object == NULL) { 1515 return (TRUE); 1516 } 1517 vm_object_lock(prev_object); 1518 1519 /* 1520 * Try to collapse the object first 1521 */ 1522 vm_object_collapse(prev_object); 1523 1524 /* 1525 * Can't coalesce if: . more than one reference . paged out . shadows 1526 * another object . has a copy elsewhere (any of which mean that the 1527 * pages not mapped to prev_entry may be in use anyway) 1528 */ 1529 1530 if (prev_object->ref_count > 1 || 1531 prev_object->pager != NULL || 1532 prev_object->shadow != NULL || 1533 prev_object->copy != NULL) { 1534 vm_object_unlock(prev_object); 1535 return (FALSE); 1536 } 1537 /* 1538 * Remove any pages that may still be in the object from a previous 1539 * deallocation. 1540 */ 1541 1542 vm_object_page_remove(prev_object, 1543 prev_offset + prev_size, 1544 prev_offset + prev_size + next_size, FALSE); 1545 1546 /* 1547 * Extend the object if necessary. 1548 */ 1549 newsize = prev_offset + prev_size + next_size; 1550 if (newsize > prev_object->size) 1551 prev_object->size = newsize; 1552 1553 vm_object_unlock(prev_object); 1554 return (TRUE); 1555} 1556 1557/* 1558 * returns page after looking up in shadow chain 1559 */ 1560 1561vm_page_t 1562vm_object_page_lookup(object, offset) 1563 vm_object_t object; 1564 vm_offset_t offset; 1565{ 1566 vm_page_t m; 1567 1568 if (!(m = vm_page_lookup(object, offset))) { 1569 if (!object->shadow) 1570 return 0; 1571 else 1572 return vm_object_page_lookup(object->shadow, offset + object->shadow_offset); 1573 } 1574 return m; 1575} 1576 1577int 1578_vm_object_in_map(map, object, entry) 1579 vm_map_t map; 1580 vm_object_t object; 1581 vm_map_entry_t entry; 1582{ 1583 vm_map_t tmpm; 1584 vm_map_entry_t tmpe; 1585 vm_object_t obj; 1586 int entcount; 1587 1588 if (map == 0) 1589 return 0; 1590 1591 if (entry == 0) { 1592 tmpe = map->header.next; 1593 entcount = map->nentries; 1594 while (entcount-- && (tmpe != &map->header)) { 1595 if( _vm_object_in_map(map, object, tmpe)) { 1596 return 1; 1597 } 1598 tmpe = tmpe->next; 1599 } 1600 } else if (entry->is_sub_map || entry->is_a_map) { 1601 tmpm = entry->object.share_map; 1602 tmpe = tmpm->header.next; 1603 entcount = tmpm->nentries; 1604 while (entcount-- && tmpe != &tmpm->header) { 1605 if( _vm_object_in_map(tmpm, object, tmpe)) { 1606 return 1; 1607 } 1608 tmpe = tmpe->next; 1609 } 1610 } else if (obj = entry->object.vm_object) { 1611 for(; obj; obj=obj->shadow) 1612 if( obj == object) { 1613 return 1; 1614 } 1615 } 1616 return 0; 1617} 1618 1619int 1620vm_object_in_map( object) 1621 vm_object_t object; 1622{ 1623 struct proc *p; 1624 for (p = (struct proc *) allproc; p != NULL; p = p->p_next) { 1625 if( !p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */) 1626 continue; 1627/* 1628 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 1629 continue; 1630 } 1631*/ 1632 if( _vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) 1633 return 1; 1634 } 1635 if( _vm_object_in_map( kernel_map, object, 0)) 1636 return 1; 1637 if( _vm_object_in_map( kmem_map, object, 0)) 1638 return 1; 1639 if( _vm_object_in_map( pager_map, object, 0)) 1640 return 1; 1641 if( _vm_object_in_map( buffer_map, object, 0)) 1642 return 1; 1643 if( _vm_object_in_map( io_map, object, 0)) 1644 return 1; 1645 if( _vm_object_in_map( phys_map, object, 0)) 1646 return 1; 1647 if( _vm_object_in_map( mb_map, object, 0)) 1648 return 1; 1649 if( _vm_object_in_map( u_map, object, 0)) 1650 return 1; 1651 return 0; 1652} 1653 1654void 1655vm_object_check() { 1656 int i; 1657 int maxhash = 0; 1658 vm_object_t object; 1659 vm_object_hash_entry_t entry; 1660 1661 /* 1662 * make sure that no internal objs are hashed 1663 */ 1664 for (i=0; i<VM_OBJECT_HASH_COUNT;i++) { 1665 int lsize = 0; 1666 for (entry = vm_object_hashtable[i].tqh_first; 1667 entry != NULL; 1668 entry = entry->hash_links.tqe_next) { 1669 if( entry->object->flags & OBJ_INTERNAL) { 1670 printf("vmochk: internal obj on hash: size: %d\n", entry->object->size); 1671 } 1672 ++lsize; 1673 } 1674 if( lsize > maxhash) 1675 maxhash = lsize; 1676 } 1677 1678 printf("maximum object hash queue size: %d\n", maxhash); 1679 1680 /* 1681 * make sure that internal objs are in a map somewhere 1682 * and none have zero ref counts. 1683 */ 1684 for (object = vm_object_list.tqh_first; 1685 object != NULL; 1686 object = object->object_list.tqe_next) { 1687 if( object->flags & OBJ_INTERNAL) { 1688 if( object->ref_count == 0) { 1689 printf("vmochk: internal obj has zero ref count: %d\n", 1690 object->size); 1691 } 1692 if( !vm_object_in_map(object)) { 1693 printf("vmochk: internal obj is not in a map: ref: %d, size: %d, pager: 0x%x, shadow: 0x%x\n", object->ref_count, object->size, object->pager, object->shadow); 1694 } 1695 } 1696 } 1697} 1698 1699#define DEBUG 1700#if defined(DEBUG) || defined(DDB) 1701/* 1702 * vm_object_print: [ debug ] 1703 */ 1704void 1705vm_object_print(object, full) 1706 vm_object_t object; 1707 boolean_t full; 1708{ 1709 register vm_page_t p; 1710 1711 register int count; 1712 1713 if (object == NULL) 1714 return; 1715 1716 iprintf("Object 0x%x: size=0x%x, res=%d, ref=%d, ", 1717 (int) object, (int) object->size, 1718 object->resident_page_count, object->ref_count); 1719 printf("pager=0x%x+0x%x, shadow=(0x%x)+0x%x\n", 1720 (int) object->pager, (int) object->paging_offset, 1721 (int) object->shadow, (int) object->shadow_offset); 1722 printf("cache: next=%p, prev=%p\n", 1723 object->cached_list.tqe_next, object->cached_list.tqe_prev); 1724 1725 if (!full) 1726 return; 1727 1728 indent += 2; 1729 count = 0; 1730 for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) { 1731 if (count == 0) 1732 iprintf("memory:="); 1733 else if (count == 6) { 1734 printf("\n"); 1735 iprintf(" ..."); 1736 count = 0; 1737 } else 1738 printf(","); 1739 count++; 1740 1741 printf("(off=0x%lx,page=0x%lx)", 1742 (u_long) p->offset, (u_long) VM_PAGE_TO_PHYS(p)); 1743 } 1744 if (count != 0) 1745 printf("\n"); 1746 indent -= 2; 1747} 1748#endif /* defined(DEBUG) || defined(DDB) */ 1749