vm_object.c revision 5203
1/* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $Id: vm_object.c,v 1.12 1994/12/11 01:36:53 davidg Exp $ 65 */ 66 67/* 68 * Virtual memory object module. 69 */ 70 71#include <sys/param.h> 72#include <sys/systm.h> 73#include <sys/kernel.h> 74#include <sys/proc.h> /* for curproc, pageproc */ 75#include <sys/malloc.h> 76 77#include <vm/vm.h> 78#include <vm/vm_page.h> 79#include <vm/vm_pageout.h> 80#include <vm/swap_pager.h> 81 82static void _vm_object_allocate(vm_size_t, vm_object_t); 83static void vm_object_rcollapse(vm_object_t, vm_object_t); 84 85/* 86 * Virtual memory objects maintain the actual data 87 * associated with allocated virtual memory. A given 88 * page of memory exists within exactly one object. 89 * 90 * An object is only deallocated when all "references" 91 * are given up. Only one "reference" to a given 92 * region of an object should be writeable. 93 * 94 * Associated with each object is a list of all resident 95 * memory pages belonging to that object; this list is 96 * maintained by the "vm_page" module, and locked by the object's 97 * lock. 98 * 99 * Each object also records a "pager" routine which is 100 * used to retrieve (and store) pages to the proper backing 101 * storage. In addition, objects may be backed by other 102 * objects from which they were virtual-copied. 103 * 104 * The only items within the object structure which are 105 * modified after time of creation are: 106 * reference count locked by object's lock 107 * pager routine locked by object's lock 108 * 109 */ 110 111 112struct vm_object kernel_object_store; 113struct vm_object kmem_object_store; 114 115extern int vm_cache_max; 116#define VM_OBJECT_HASH_COUNT 157 117 118struct vm_object_hash_head vm_object_hashtable[VM_OBJECT_HASH_COUNT]; 119 120long object_collapses = 0; 121long object_bypasses = 0; 122 123static void 124_vm_object_allocate(size, object) 125 vm_size_t size; 126 register vm_object_t object; 127{ 128 bzero(object, sizeof *object); 129 TAILQ_INIT(&object->memq); 130 TAILQ_INIT(&object->reverse_shadow_head); 131 vm_object_lock_init(object); 132 object->ref_count = 1; 133 object->resident_page_count = 0; 134 object->size = size; 135 object->flags = OBJ_INTERNAL; /* vm_allocate_with_pager will reset */ 136 object->paging_in_progress = 0; 137 object->copy = NULL; 138 139 /* 140 * Object starts out read-write, with no pager. 141 */ 142 143 object->pager = NULL; 144 object->paging_offset = 0; 145 object->shadow = NULL; 146 object->shadow_offset = (vm_offset_t) 0; 147 148 simple_lock(&vm_object_list_lock); 149 TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); 150 vm_object_count++; 151 cnt.v_nzfod += atop(size); 152 simple_unlock(&vm_object_list_lock); 153} 154 155/* 156 * vm_object_init: 157 * 158 * Initialize the VM objects module. 159 */ 160void 161vm_object_init(vm_offset_t nothing) 162{ 163 register int i; 164 165 TAILQ_INIT(&vm_object_cached_list); 166 TAILQ_INIT(&vm_object_list); 167 vm_object_count = 0; 168 simple_lock_init(&vm_cache_lock); 169 simple_lock_init(&vm_object_list_lock); 170 171 for (i = 0; i < VM_OBJECT_HASH_COUNT; i++) 172 TAILQ_INIT(&vm_object_hashtable[i]); 173 174 kernel_object = &kernel_object_store; 175 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS, 176 kernel_object); 177 178 kmem_object = &kmem_object_store; 179 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS, 180 kmem_object); 181} 182 183/* 184 * vm_object_allocate: 185 * 186 * Returns a new object with the given size. 187 */ 188 189vm_object_t 190vm_object_allocate(size) 191 vm_size_t size; 192{ 193 register vm_object_t result; 194 195 result = (vm_object_t) 196 malloc((u_long)sizeof *result, M_VMOBJ, M_WAITOK); 197 198 199 _vm_object_allocate(size, result); 200 201 return(result); 202} 203 204 205/* 206 * vm_object_reference: 207 * 208 * Gets another reference to the given object. 209 */ 210inline void 211vm_object_reference(object) 212 register vm_object_t object; 213{ 214 if (object == NULL) 215 return; 216 217 vm_object_lock(object); 218 object->ref_count++; 219 vm_object_unlock(object); 220} 221 222/* 223 * vm_object_deallocate: 224 * 225 * Release a reference to the specified object, 226 * gained either through a vm_object_allocate 227 * or a vm_object_reference call. When all references 228 * are gone, storage associated with this object 229 * may be relinquished. 230 * 231 * No object may be locked. 232 */ 233void 234vm_object_deallocate(object) 235 vm_object_t object; 236{ 237 vm_object_t temp; 238 239 while (object != NULL) { 240 241 /* 242 * The cache holds a reference (uncounted) to 243 * the object; we must lock it before removing 244 * the object. 245 */ 246 247 vm_object_cache_lock(); 248 249 /* 250 * Lose the reference 251 */ 252 vm_object_lock(object); 253 if (--(object->ref_count) != 0) { 254 if( object->ref_count == 1) 255 vm_object_rcollapse(object->reverse_shadow_head.tqh_first, object); 256 257 vm_object_unlock(object); 258 /* 259 * If there are still references, then 260 * we are done. 261 */ 262 vm_object_cache_unlock(); 263 return; 264 } 265 266 /* 267 * See if this object can persist. If so, enter 268 * it in the cache, then deactivate all of its 269 * pages. 270 */ 271 272 if (object->flags & OBJ_CANPERSIST) { 273 274 TAILQ_INSERT_TAIL(&vm_object_cached_list, object, 275 cached_list); 276 vm_object_cached++; 277 vm_object_cache_unlock(); 278 279/* 280 * this code segment was removed because it kills performance with 281 * large -- repetively used binaries. The functionality now resides 282 * in the pageout daemon 283 * vm_object_deactivate_pages(object); 284 */ 285 vm_object_unlock(object); 286 287 vm_object_cache_trim(); 288 return; 289 } 290 291 /* 292 * Make sure no one can look us up now. 293 */ 294 vm_object_remove(object->pager); 295 vm_object_cache_unlock(); 296 297 temp = object->shadow; 298 if( temp) 299 TAILQ_REMOVE(&temp->reverse_shadow_head, object, reverse_shadow_list); 300 vm_object_terminate(object); 301 /* unlocks and deallocates object */ 302 object = temp; 303 } 304} 305 306/* 307 * vm_object_terminate actually destroys the specified object, freeing 308 * up all previously used resources. 309 * 310 * The object must be locked. 311 */ 312void 313vm_object_terminate(object) 314 register vm_object_t object; 315{ 316 register vm_page_t p; 317 vm_object_t shadow_object; 318 int s; 319 320 /* 321 * Detach the object from its shadow if we are the shadow's 322 * copy. 323 */ 324 if ((shadow_object = object->shadow) != NULL) { 325 vm_object_lock(shadow_object); 326 if (shadow_object->copy == object) 327 shadow_object->copy = NULL; 328/* 329 else if (shadow_object->copy != NULL) 330 panic("vm_object_terminate: copy/shadow inconsistency"); 331*/ 332 vm_object_unlock(shadow_object); 333 } 334 335 /* 336 * Wait until the pageout daemon is through 337 * with the object. 338 */ 339 340 while (object->paging_in_progress) { 341 vm_object_sleep((int)object, object, FALSE); 342 vm_object_lock(object); 343 } 344 345 /* 346 * While the paging system is locked, 347 * pull the object's pages off the active 348 * and inactive queues. This keeps the 349 * pageout daemon from playing with them 350 * during vm_pager_deallocate. 351 * 352 * We can't free the pages yet, because the 353 * object's pager may have to write them out 354 * before deallocating the paging space. 355 */ 356 357 for( p = object->memq.tqh_first; p; p=p->listq.tqe_next) { 358 VM_PAGE_CHECK(p); 359 360 vm_page_lock_queues(); 361 s = splhigh(); 362 if (p->flags & PG_ACTIVE) { 363 TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 364 p->flags &= ~PG_ACTIVE; 365 cnt.v_active_count--; 366 } 367 368 if (p->flags & PG_INACTIVE) { 369 TAILQ_REMOVE(&vm_page_queue_inactive, p, pageq); 370 p->flags &= ~PG_INACTIVE; 371 cnt.v_inactive_count--; 372 } 373 splx(s); 374 vm_page_unlock_queues(); 375 } 376 377 vm_object_unlock(object); 378 379 if (object->paging_in_progress != 0) 380 panic("vm_object_deallocate: pageout in progress"); 381 382 /* 383 * Clean and free the pages, as appropriate. 384 * All references to the object are gone, 385 * so we don't need to lock it. 386 */ 387 388 if ((object->flags & OBJ_INTERNAL) == 0) { 389 vm_object_lock(object); 390 (void) vm_object_page_clean(object, 0, 0, TRUE, TRUE); 391 vm_object_unlock(object); 392 } 393 394 /* 395 * Now free the pages. 396 * For internal objects, this also removes them from paging queues. 397 */ 398 while ((p = object->memq.tqh_first) != NULL) { 399 VM_PAGE_CHECK(p); 400 vm_page_lock_queues(); 401 vm_page_free(p); 402 cnt.v_pfree++; 403 vm_page_unlock_queues(); 404 } 405 406 /* 407 * Let the pager know object is dead. 408 */ 409 410 if (object->pager != NULL) 411 vm_pager_deallocate(object->pager); 412 413 414 simple_lock(&vm_object_list_lock); 415 TAILQ_REMOVE(&vm_object_list, object, object_list); 416 vm_object_count--; 417 simple_unlock(&vm_object_list_lock); 418 419 /* 420 * Free the space for the object. 421 */ 422 423 free((caddr_t)object, M_VMOBJ); 424} 425 426/* 427 * vm_object_page_clean 428 * 429 * Clean all dirty pages in the specified range of object. 430 * Leaves page on whatever queue it is currently on. 431 * 432 * Odd semantics: if start == end, we clean everything. 433 * 434 * The object must be locked. 435 */ 436#if 1 437boolean_t 438vm_object_page_clean(object, start, end, syncio, de_queue) 439 register vm_object_t object; 440 register vm_offset_t start; 441 register vm_offset_t end; 442 boolean_t syncio; 443 boolean_t de_queue; 444{ 445 register vm_page_t p, nextp; 446 int size; 447 448 if (object->pager == NULL) 449 return 1; 450 451 if (start != end) { 452 start = trunc_page(start); 453 end = round_page(end); 454 } 455 size = end - start; 456 457again: 458 /* 459 * Wait until the pageout daemon is through with the object. 460 */ 461 while (object->paging_in_progress) { 462 vm_object_sleep((int)object, object, FALSE); 463 } 464 465 nextp = object->memq.tqh_first; 466 while ( (p = nextp) && ((start == end) || (size != 0) ) ) { 467 nextp = p->listq.tqe_next; 468 if (start == end || (p->offset >= start && p->offset < end)) { 469 if (p->flags & PG_BUSY) 470 continue; 471 472 size -= PAGE_SIZE; 473 474 if ((p->flags & PG_CLEAN) 475 && pmap_is_modified(VM_PAGE_TO_PHYS(p))) 476 p->flags &= ~PG_CLEAN; 477 478 if ((p->flags & PG_CLEAN) == 0) { 479 vm_pageout_clean(p,VM_PAGEOUT_FORCE); 480 goto again; 481 } 482 } 483 } 484 wakeup((caddr_t)object); 485 return 1; 486} 487#endif 488/* 489 * vm_object_page_clean 490 * 491 * Clean all dirty pages in the specified range of object. 492 * If syncio is TRUE, page cleaning is done synchronously. 493 * If de_queue is TRUE, pages are removed from any paging queue 494 * they were on, otherwise they are left on whatever queue they 495 * were on before the cleaning operation began. 496 * 497 * Odd semantics: if start == end, we clean everything. 498 * 499 * The object must be locked. 500 * 501 * Returns TRUE if all was well, FALSE if there was a pager error 502 * somewhere. We attempt to clean (and dequeue) all pages regardless 503 * of where an error occurs. 504 */ 505#if 0 506boolean_t 507vm_object_page_clean(object, start, end, syncio, de_queue) 508 register vm_object_t object; 509 register vm_offset_t start; 510 register vm_offset_t end; 511 boolean_t syncio; 512 boolean_t de_queue; 513{ 514 register vm_page_t p; 515 int onqueue; 516 boolean_t noerror = TRUE; 517 518 if (object == NULL) 519 return (TRUE); 520 521 /* 522 * If it is an internal object and there is no pager, attempt to 523 * allocate one. Note that vm_object_collapse may relocate one 524 * from a collapsed object so we must recheck afterward. 525 */ 526 if ((object->flags & OBJ_INTERNAL) && object->pager == NULL) { 527 vm_object_collapse(object); 528 if (object->pager == NULL) { 529 vm_pager_t pager; 530 531 vm_object_unlock(object); 532 pager = vm_pager_allocate(PG_DFLT, (caddr_t)0, 533 object->size, VM_PROT_ALL, 534 (vm_offset_t)0); 535 if (pager) 536 vm_object_setpager(object, pager, 0, FALSE); 537 vm_object_lock(object); 538 } 539 } 540 if (object->pager == NULL) 541 return (FALSE); 542 543again: 544 /* 545 * Wait until the pageout daemon is through with the object. 546 */ 547 while (object->paging_in_progress) { 548 vm_object_sleep((int)object, object, FALSE); 549 vm_object_lock(object); 550 } 551 /* 552 * Loop through the object page list cleaning as necessary. 553 */ 554 for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) { 555 onqueue = 0; 556 if ((start == end || p->offset >= start && p->offset < end) && 557 !(p->flags & PG_FICTITIOUS)) { 558 if ((p->flags & PG_CLEAN) && 559 pmap_is_modified(VM_PAGE_TO_PHYS(p))) 560 p->flags &= ~PG_CLEAN; 561 /* 562 * Remove the page from any paging queue. 563 * This needs to be done if either we have been 564 * explicitly asked to do so or it is about to 565 * be cleaned (see comment below). 566 */ 567 if (de_queue || !(p->flags & PG_CLEAN)) { 568 vm_page_lock_queues(); 569 if (p->flags & PG_ACTIVE) { 570 TAILQ_REMOVE(&vm_page_queue_active, 571 p, pageq); 572 p->flags &= ~PG_ACTIVE; 573 cnt.v_active_count--; 574 onqueue = 1; 575 } else if (p->flags & PG_INACTIVE) { 576 TAILQ_REMOVE(&vm_page_queue_inactive, 577 p, pageq); 578 p->flags &= ~PG_INACTIVE; 579 cnt.v_inactive_count--; 580 onqueue = -1; 581 } else 582 onqueue = 0; 583 vm_page_unlock_queues(); 584 } 585 /* 586 * To ensure the state of the page doesn't change 587 * during the clean operation we do two things. 588 * First we set the busy bit and write-protect all 589 * mappings to ensure that write accesses to the 590 * page block (in vm_fault). Second, we remove 591 * the page from any paging queue to foil the 592 * pageout daemon (vm_pageout_scan). 593 */ 594 pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_READ); 595 if (!(p->flags & PG_CLEAN)) { 596 p->flags |= PG_BUSY; 597 object->paging_in_progress++; 598 vm_object_unlock(object); 599 /* 600 * XXX if put fails we mark the page as 601 * clean to avoid an infinite loop. 602 * Will loose changes to the page. 603 */ 604 if (vm_pager_put(object->pager, p, syncio)) { 605 printf("%s: pager_put error\n", 606 "vm_object_page_clean"); 607 p->flags |= PG_CLEAN; 608 noerror = FALSE; 609 } 610 vm_object_lock(object); 611 object->paging_in_progress--; 612 if (!de_queue && onqueue) { 613 vm_page_lock_queues(); 614 if (onqueue > 0) 615 vm_page_activate(p); 616 else 617 vm_page_deactivate(p); 618 vm_page_unlock_queues(); 619 } 620 PAGE_WAKEUP(p); 621 goto again; 622 } 623 } 624 } 625 return (noerror); 626} 627#endif 628 629/* 630 * vm_object_deactivate_pages 631 * 632 * Deactivate all pages in the specified object. (Keep its pages 633 * in memory even though it is no longer referenced.) 634 * 635 * The object must be locked. 636 */ 637void 638vm_object_deactivate_pages(object) 639 register vm_object_t object; 640{ 641 register vm_page_t p, next; 642 643 for (p = object->memq.tqh_first; p != NULL; p = next) { 644 next = p->listq.tqe_next; 645 vm_page_lock_queues(); 646 vm_page_deactivate(p); 647 vm_page_unlock_queues(); 648 } 649} 650 651/* 652 * Trim the object cache to size. 653 */ 654void 655vm_object_cache_trim() 656{ 657 register vm_object_t object; 658 659 vm_object_cache_lock(); 660 while (vm_object_cached > vm_cache_max) { 661 object = vm_object_cached_list.tqh_first; 662 vm_object_cache_unlock(); 663 664 if (object != vm_object_lookup(object->pager)) 665 panic("vm_object_cache_trim: I'm sooo confused."); 666 667 pager_cache(object, FALSE); 668 669 vm_object_cache_lock(); 670 } 671 vm_object_cache_unlock(); 672} 673 674 675/* 676 * vm_object_pmap_copy: 677 * 678 * Makes all physical pages in the specified 679 * object range copy-on-write. No writeable 680 * references to these pages should remain. 681 * 682 * The object must *not* be locked. 683 */ 684void vm_object_pmap_copy(object, start, end) 685 register vm_object_t object; 686 register vm_offset_t start; 687 register vm_offset_t end; 688{ 689 register vm_page_t p; 690 691 if (object == NULL) 692 return; 693 694 vm_object_lock(object); 695 for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) { 696 if ((start <= p->offset) && (p->offset < end)) { 697 pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_READ); 698 p->flags |= PG_COPYONWRITE; 699 } 700 } 701 vm_object_unlock(object); 702} 703 704/* 705 * vm_object_pmap_remove: 706 * 707 * Removes all physical pages in the specified 708 * object range from all physical maps. 709 * 710 * The object must *not* be locked. 711 */ 712void 713vm_object_pmap_remove(object, start, end) 714 register vm_object_t object; 715 register vm_offset_t start; 716 register vm_offset_t end; 717{ 718 register vm_page_t p; 719 int s; 720 721 if (object == NULL) 722 return; 723 ++object->paging_in_progress; 724 725 vm_object_lock(object); 726again: 727 for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) { 728 if ((start <= p->offset) && (p->offset < end)) { 729 s = splhigh(); 730 if (p->flags & PG_BUSY) { 731 p->flags |= PG_WANTED; 732 tsleep((caddr_t) p, PVM, "vmopmr", 0); 733 splx(s); 734 goto again; 735 } 736 splx(s); 737 pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); 738 if ((p->flags & PG_CLEAN) == 0) 739 p->flags |= PG_LAUNDRY; 740 } 741 } 742 vm_object_unlock(object); 743 --object->paging_in_progress; 744 if( object->paging_in_progress == 0) 745 wakeup((caddr_t) object); 746} 747 748/* 749 * vm_object_copy: 750 * 751 * Create a new object which is a copy of an existing 752 * object, and mark all of the pages in the existing 753 * object 'copy-on-write'. The new object has one reference. 754 * Returns the new object. 755 * 756 * May defer the copy until later if the object is not backed 757 * up by a non-default pager. 758 */ 759void vm_object_copy(src_object, src_offset, size, 760 dst_object, dst_offset, src_needs_copy) 761 register vm_object_t src_object; 762 vm_offset_t src_offset; 763 vm_size_t size; 764 vm_object_t *dst_object; /* OUT */ 765 vm_offset_t *dst_offset; /* OUT */ 766 boolean_t *src_needs_copy; /* OUT */ 767{ 768 register vm_object_t new_copy; 769 register vm_object_t old_copy; 770 vm_offset_t new_start, new_end; 771 772 register vm_page_t p; 773 774 if (src_object == NULL) { 775 /* 776 * Nothing to copy 777 */ 778 *dst_object = NULL; 779 *dst_offset = 0; 780 *src_needs_copy = FALSE; 781 return; 782 } 783 784 785 /* 786 * If the object's pager is null_pager or the 787 * default pager, we don't have to make a copy 788 * of it. Instead, we set the needs copy flag and 789 * make a shadow later. 790 */ 791 792 vm_object_lock(src_object); 793 794 /* 795 * Try to collapse the object before copying it. 796 */ 797 798 vm_object_collapse(src_object); 799 800 if (src_object->pager == NULL || 801 src_object->pager->pg_type == PG_SWAP || 802 (src_object->flags & OBJ_INTERNAL)) { 803 804 /* 805 * Make another reference to the object 806 */ 807 src_object->ref_count++; 808 809 /* 810 * Mark all of the pages copy-on-write. 811 */ 812 for (p = src_object->memq.tqh_first; p; p = p->listq.tqe_next) 813 if (src_offset <= p->offset && 814 p->offset < src_offset + size) 815 p->flags |= PG_COPYONWRITE; 816 vm_object_unlock(src_object); 817 818 *dst_object = src_object; 819 *dst_offset = src_offset; 820 821 /* 822 * Must make a shadow when write is desired 823 */ 824 *src_needs_copy = TRUE; 825 return; 826 } 827 828 829 /* 830 * If the object has a pager, the pager wants to 831 * see all of the changes. We need a copy-object 832 * for the changed pages. 833 * 834 * If there is a copy-object, and it is empty, 835 * no changes have been made to the object since the 836 * copy-object was made. We can use the same copy- 837 * object. 838 */ 839 840 Retry1: 841 old_copy = src_object->copy; 842 if (old_copy != NULL) { 843 /* 844 * Try to get the locks (out of order) 845 */ 846 if (!vm_object_lock_try(old_copy)) { 847 vm_object_unlock(src_object); 848 849 /* should spin a bit here... */ 850 vm_object_lock(src_object); 851 goto Retry1; 852 } 853 854 if (old_copy->resident_page_count == 0 && 855 old_copy->pager == NULL) { 856 /* 857 * Return another reference to 858 * the existing copy-object. 859 */ 860 old_copy->ref_count++; 861 vm_object_unlock(old_copy); 862 vm_object_unlock(src_object); 863 *dst_object = old_copy; 864 *dst_offset = src_offset; 865 *src_needs_copy = FALSE; 866 return; 867 } 868 vm_object_unlock(old_copy); 869 } 870 vm_object_unlock(src_object); 871 872 /* 873 * If the object has a pager, the pager wants 874 * to see all of the changes. We must make 875 * a copy-object and put the changed pages there. 876 * 877 * The copy-object is always made large enough to 878 * completely shadow the original object, since 879 * it may have several users who want to shadow 880 * the original object at different points. 881 */ 882 883 new_copy = vm_object_allocate(src_object->size); 884 885 Retry2: 886 vm_object_lock(src_object); 887 /* 888 * Copy object may have changed while we were unlocked 889 */ 890 old_copy = src_object->copy; 891 if (old_copy != NULL) { 892 /* 893 * Try to get the locks (out of order) 894 */ 895 if (!vm_object_lock_try(old_copy)) { 896 vm_object_unlock(src_object); 897 goto Retry2; 898 } 899 900 /* 901 * Consistency check 902 */ 903 if (old_copy->shadow != src_object || 904 old_copy->shadow_offset != (vm_offset_t) 0) 905 panic("vm_object_copy: copy/shadow inconsistency"); 906 907 /* 908 * Make the old copy-object shadow the new one. 909 * It will receive no more pages from the original 910 * object. 911 */ 912 913 src_object->ref_count--; /* remove ref. from old_copy */ 914 if( old_copy->shadow) 915 TAILQ_REMOVE(&old_copy->shadow->reverse_shadow_head, old_copy, reverse_shadow_list); 916 old_copy->shadow = new_copy; 917 TAILQ_INSERT_TAIL(&old_copy->shadow->reverse_shadow_head, old_copy, reverse_shadow_list); 918 new_copy->ref_count++; /* locking not needed - we 919 have the only pointer */ 920 vm_object_unlock(old_copy); /* done with old_copy */ 921 } 922 923 new_start = (vm_offset_t) 0; /* always shadow original at 0 */ 924 new_end = (vm_offset_t) new_copy->size; /* for the whole object */ 925 926 /* 927 * Point the new copy at the existing object. 928 */ 929 930 new_copy->shadow = src_object; 931 TAILQ_INSERT_TAIL(&new_copy->shadow->reverse_shadow_head, new_copy, reverse_shadow_list); 932 new_copy->shadow_offset = new_start; 933 src_object->ref_count++; 934 src_object->copy = new_copy; 935 936 /* 937 * Mark all the affected pages of the existing object 938 * copy-on-write. 939 */ 940 for (p = src_object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) 941 if ((new_start <= p->offset) && (p->offset < new_end)) 942 p->flags |= PG_COPYONWRITE; 943 944 vm_object_unlock(src_object); 945 946 *dst_object = new_copy; 947 *dst_offset = src_offset - new_start; 948 *src_needs_copy = FALSE; 949} 950 951/* 952 * vm_object_shadow: 953 * 954 * Create a new object which is backed by the 955 * specified existing object range. The source 956 * object reference is deallocated. 957 * 958 * The new object and offset into that object 959 * are returned in the source parameters. 960 */ 961 962void 963vm_object_shadow(object, offset, length) 964 vm_object_t *object; /* IN/OUT */ 965 vm_offset_t *offset; /* IN/OUT */ 966 vm_size_t length; 967{ 968 register vm_object_t source; 969 register vm_object_t result; 970 971 source = *object; 972 973 /* 974 * Allocate a new object with the given length 975 */ 976 977 if ((result = vm_object_allocate(length)) == NULL) 978 panic("vm_object_shadow: no object for shadowing"); 979 980 /* 981 * The new object shadows the source object, adding 982 * a reference to it. Our caller changes his reference 983 * to point to the new object, removing a reference to 984 * the source object. Net result: no change of reference 985 * count. 986 */ 987 result->shadow = source; 988 if (source) 989 TAILQ_INSERT_TAIL(&result->shadow->reverse_shadow_head, result, reverse_shadow_list); 990 991 /* 992 * Store the offset into the source object, 993 * and fix up the offset into the new object. 994 */ 995 996 result->shadow_offset = *offset; 997 998 /* 999 * Return the new things 1000 */ 1001 1002 *offset = 0; 1003 *object = result; 1004} 1005 1006/* 1007 * Set the specified object's pager to the specified pager. 1008 */ 1009 1010void 1011vm_object_setpager(object, pager, paging_offset, 1012 read_only) 1013 vm_object_t object; 1014 vm_pager_t pager; 1015 vm_offset_t paging_offset; 1016 boolean_t read_only; 1017{ 1018#ifdef lint 1019 read_only++; /* No longer used */ 1020#endif lint 1021 1022 vm_object_lock(object); /* XXX ? */ 1023 if (object->pager && object->pager != pager) { 1024 panic("!!!pager already allocated!!!\n"); 1025 } 1026 object->pager = pager; 1027 object->paging_offset = paging_offset; 1028 vm_object_unlock(object); /* XXX ? */ 1029} 1030 1031/* 1032 * vm_object_hash hashes the pager/id pair. 1033 */ 1034 1035#define vm_object_hash(pager) \ 1036 (((unsigned)pager >> 5)%VM_OBJECT_HASH_COUNT) 1037 1038/* 1039 * vm_object_lookup looks in the object cache for an object with the 1040 * specified pager and paging id. 1041 */ 1042 1043vm_object_t vm_object_lookup(pager) 1044 vm_pager_t pager; 1045{ 1046 register vm_object_hash_entry_t entry; 1047 vm_object_t object; 1048 1049 cnt.v_lookups++; 1050 vm_object_cache_lock(); 1051 1052 for (entry = vm_object_hashtable[vm_object_hash(pager)].tqh_first; 1053 entry != NULL; 1054 entry = entry->hash_links.tqe_next) { 1055 object = entry->object; 1056 if (object->pager == pager) { 1057 vm_object_lock(object); 1058 if (object->ref_count == 0) { 1059 TAILQ_REMOVE(&vm_object_cached_list, object, 1060 cached_list); 1061 vm_object_cached--; 1062 } 1063 object->ref_count++; 1064 vm_object_unlock(object); 1065 vm_object_cache_unlock(); 1066 cnt.v_hits++; 1067 return(object); 1068 } 1069 } 1070 1071 vm_object_cache_unlock(); 1072 return(NULL); 1073} 1074 1075/* 1076 * vm_object_enter enters the specified object/pager/id into 1077 * the hash table. 1078 */ 1079 1080void vm_object_enter(object, pager) 1081 vm_object_t object; 1082 vm_pager_t pager; 1083{ 1084 struct vm_object_hash_head *bucket; 1085 register vm_object_hash_entry_t entry; 1086 1087 /* 1088 * We don't cache null objects, and we can't cache 1089 * objects with the null pager. 1090 */ 1091 1092 if (object == NULL) 1093 return; 1094 if (pager == NULL) 1095 return; 1096 1097 bucket = &vm_object_hashtable[vm_object_hash(pager)]; 1098 entry = (vm_object_hash_entry_t) 1099 malloc((u_long)sizeof *entry, M_VMOBJHASH, M_WAITOK); 1100 entry->object = object; 1101 object->flags |= OBJ_CANPERSIST; 1102 1103 vm_object_cache_lock(); 1104 TAILQ_INSERT_TAIL(bucket, entry, hash_links); 1105 vm_object_cache_unlock(); 1106} 1107 1108/* 1109 * vm_object_remove: 1110 * 1111 * Remove the pager from the hash table. 1112 * Note: This assumes that the object cache 1113 * is locked. XXX this should be fixed 1114 * by reorganizing vm_object_deallocate. 1115 */ 1116void 1117vm_object_remove(pager) 1118 register vm_pager_t pager; 1119{ 1120 struct vm_object_hash_head *bucket; 1121 register vm_object_hash_entry_t entry; 1122 register vm_object_t object; 1123 1124 bucket = &vm_object_hashtable[vm_object_hash(pager)]; 1125 1126 for (entry = bucket->tqh_first; 1127 entry != NULL; 1128 entry = entry->hash_links.tqe_next) { 1129 object = entry->object; 1130 if (object->pager == pager) { 1131 TAILQ_REMOVE(bucket, entry, hash_links); 1132 free((caddr_t)entry, M_VMOBJHASH); 1133 break; 1134 } 1135 } 1136} 1137 1138static void 1139vm_object_rcollapse(object, sobject) 1140 register vm_object_t object, sobject; 1141{ 1142 register vm_object_t backing_object; 1143 register vm_offset_t backing_offset, new_offset; 1144 register vm_page_t p, pp; 1145 register vm_size_t size; 1146 int s; 1147 1148 if( !object) 1149 return; 1150 backing_object = object->shadow; 1151 if( backing_object != sobject) { 1152 printf("backing obj != sobject!!!\n"); 1153 return; 1154 } 1155 if( !backing_object) 1156 return; 1157 if( (backing_object->flags & OBJ_INTERNAL) == 0) 1158 return; 1159 if (backing_object->shadow != NULL && 1160 backing_object->shadow->copy == backing_object) 1161 return; 1162 if (backing_object->ref_count != 1) 1163 return; 1164 1165 s = splbio(); 1166 while( backing_object->paging_in_progress) { 1167 tsleep( backing_object, PVM, "rcolow", 0); 1168 } 1169 splx(s); 1170 1171 backing_offset = object->shadow_offset; 1172 size = object->size; 1173 p = backing_object->memq.tqh_first; 1174 while (p) { 1175 vm_page_t next; 1176 next = p->listq.tqe_next; 1177 1178 new_offset = (p->offset - backing_offset); 1179 if (p->offset < backing_offset || 1180 new_offset >= size) { 1181 vm_page_lock_queues(); 1182 if( backing_object->pager) 1183 swap_pager_freespace(backing_object->pager, backing_object->paging_offset + p->offset, PAGE_SIZE); 1184 pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); 1185 vm_page_free(p); 1186 vm_page_unlock_queues(); 1187 } else { 1188 pp = vm_page_lookup(object, new_offset); 1189 if (pp != NULL || (object->pager && vm_pager_has_page(object->pager, 1190 object->paging_offset + new_offset))) { 1191 vm_page_lock_queues(); 1192 if( backing_object->pager) 1193 swap_pager_freespace(backing_object->pager, backing_object->paging_offset + p->offset, PAGE_SIZE); 1194 pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); 1195 vm_page_free(p); 1196 vm_page_unlock_queues(); 1197 } else { 1198 if (!backing_object->pager || !vm_pager_has_page(backing_object->pager, backing_object->paging_offset + p->offset)) 1199 vm_page_rename(p, object, new_offset); 1200 } 1201 } 1202 p = next; 1203 } 1204} 1205 1206/* 1207 * this version of collapse allows the operation to occur earlier and 1208 * when paging_in_progress is true for an object... This is not a complete 1209 * operation, but should plug 99.9% of the rest of the leaks. 1210 */ 1211static void 1212vm_object_qcollapse(object) 1213 register vm_object_t object; 1214{ 1215 register vm_object_t backing_object; 1216 register vm_offset_t backing_offset, new_offset; 1217 register vm_page_t p, pp; 1218 register vm_size_t size; 1219 1220 backing_object = object->shadow; 1221 if( !backing_object) 1222 return; 1223 if( (backing_object->flags & OBJ_INTERNAL) == 0) 1224 return; 1225 if (backing_object->shadow != NULL && 1226 backing_object->shadow->copy == backing_object) 1227 return; 1228 if (backing_object->ref_count != 1) 1229 return; 1230 1231 backing_offset = object->shadow_offset; 1232 size = object->size; 1233 p = backing_object->memq.tqh_first; 1234 while (p) { 1235 vm_page_t next; 1236 next = p->listq.tqe_next; 1237 if( (p->flags & (PG_BUSY|PG_FAKE|PG_FICTITIOUS)) || 1238 p->hold_count || p->wire_count) { 1239 p = next; 1240 continue; 1241 } 1242 1243 new_offset = (p->offset - backing_offset); 1244 if (p->offset < backing_offset || 1245 new_offset >= size) { 1246 vm_page_lock_queues(); 1247 if( backing_object->pager) 1248 swap_pager_freespace(backing_object->pager, backing_object->paging_offset + p->offset, PAGE_SIZE); 1249 pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); 1250 vm_page_free(p); 1251 vm_page_unlock_queues(); 1252 } else { 1253 pp = vm_page_lookup(object, new_offset); 1254 if (pp != NULL || (object->pager && vm_pager_has_page(object->pager, 1255 object->paging_offset + new_offset))) { 1256 vm_page_lock_queues(); 1257 if( backing_object->pager) 1258 swap_pager_freespace(backing_object->pager, backing_object->paging_offset + p->offset, PAGE_SIZE); 1259 pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); 1260 vm_page_free(p); 1261 vm_page_unlock_queues(); 1262 } else { 1263 if (!backing_object->pager || !vm_pager_has_page(backing_object->pager, backing_object->paging_offset + p->offset)) 1264 vm_page_rename(p, object, new_offset); 1265 } 1266 } 1267 p = next; 1268 } 1269} 1270 1271boolean_t vm_object_collapse_allowed = TRUE; 1272/* 1273 * vm_object_collapse: 1274 * 1275 * Collapse an object with the object backing it. 1276 * Pages in the backing object are moved into the 1277 * parent, and the backing object is deallocated. 1278 * 1279 * Requires that the object be locked and the page 1280 * queues be unlocked. 1281 * 1282 * This routine has significant changes by John S. Dyson 1283 * to fix some swap memory leaks. 18 Dec 93 1284 * 1285 */ 1286void 1287vm_object_collapse(object) 1288 register vm_object_t object; 1289 1290{ 1291 register vm_object_t backing_object; 1292 register vm_offset_t backing_offset; 1293 register vm_size_t size; 1294 register vm_offset_t new_offset; 1295 register vm_page_t p, pp; 1296 1297 if (!vm_object_collapse_allowed) 1298 return; 1299 1300 while (TRUE) { 1301 /* 1302 * Verify that the conditions are right for collapse: 1303 * 1304 * The object exists and no pages in it are currently 1305 * being paged out. 1306 */ 1307 if (object == NULL) 1308 return; 1309 if (object->paging_in_progress != 0) { 1310 if( object->shadow) 1311 vm_object_qcollapse(object); 1312 return; 1313 } 1314 1315 /* 1316 * There is a backing object, and 1317 */ 1318 1319 if ((backing_object = object->shadow) == NULL) 1320 return; 1321 1322 vm_object_lock(backing_object); 1323 /* 1324 * ... 1325 * The backing object is not read_only, 1326 * and no pages in the backing object are 1327 * currently being paged out. 1328 * The backing object is internal. 1329 */ 1330 1331 if ((backing_object->flags & OBJ_INTERNAL) == 0 || 1332 backing_object->paging_in_progress != 0) { 1333 vm_object_unlock(backing_object); 1334 vm_object_qcollapse(object); 1335 return; 1336 } 1337 1338 /* 1339 * The backing object can't be a copy-object: 1340 * the shadow_offset for the copy-object must stay 1341 * as 0. Furthermore (for the 'we have all the 1342 * pages' case), if we bypass backing_object and 1343 * just shadow the next object in the chain, old 1344 * pages from that object would then have to be copied 1345 * BOTH into the (former) backing_object and into the 1346 * parent object. 1347 */ 1348 if (backing_object->shadow != NULL && 1349 backing_object->shadow->copy == backing_object) { 1350 vm_object_unlock(backing_object); 1351 return; 1352 } 1353 1354 /* 1355 * we can deal only with the swap pager 1356 */ 1357 if ((object->pager && 1358 object->pager->pg_type != PG_SWAP) || 1359 (backing_object->pager && 1360 backing_object->pager->pg_type != PG_SWAP)) { 1361 vm_object_unlock(backing_object); 1362 return; 1363 } 1364 1365 1366 /* 1367 * We know that we can either collapse the backing 1368 * object (if the parent is the only reference to 1369 * it) or (perhaps) remove the parent's reference 1370 * to it. 1371 */ 1372 1373 backing_offset = object->shadow_offset; 1374 size = object->size; 1375 1376 /* 1377 * If there is exactly one reference to the backing 1378 * object, we can collapse it into the parent. 1379 */ 1380 1381 if (backing_object->ref_count == 1) { 1382 1383 /* 1384 * We can collapse the backing object. 1385 * 1386 * Move all in-memory pages from backing_object 1387 * to the parent. Pages that have been paged out 1388 * will be overwritten by any of the parent's 1389 * pages that shadow them. 1390 */ 1391 1392 while ((p = backing_object->memq.tqh_first) != 0) { 1393 1394 new_offset = (p->offset - backing_offset); 1395 1396 /* 1397 * If the parent has a page here, or if 1398 * this page falls outside the parent, 1399 * dispose of it. 1400 * 1401 * Otherwise, move it as planned. 1402 */ 1403 1404 if (p->offset < backing_offset || 1405 new_offset >= size) { 1406 vm_page_lock_queues(); 1407 pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); 1408 vm_page_free(p); 1409 vm_page_unlock_queues(); 1410 } else { 1411 pp = vm_page_lookup(object, new_offset); 1412 if (pp != NULL || (object->pager && vm_pager_has_page(object->pager, 1413 object->paging_offset + new_offset))) { 1414 vm_page_lock_queues(); 1415 pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); 1416 vm_page_free(p); 1417 vm_page_unlock_queues(); 1418 } else { 1419 vm_page_rename(p, object, new_offset); 1420 } 1421 } 1422 } 1423 1424 /* 1425 * Move the pager from backing_object to object. 1426 */ 1427 1428 if (backing_object->pager) { 1429 backing_object->paging_in_progress++; 1430 if (object->pager) { 1431 vm_pager_t bopager; 1432 object->paging_in_progress++; 1433 /* 1434 * copy shadow object pages into ours 1435 * and destroy unneeded pages in shadow object. 1436 */ 1437 bopager = backing_object->pager; 1438 backing_object->pager = NULL; 1439 vm_object_remove(backing_object->pager); 1440 swap_pager_copy( 1441 bopager, backing_object->paging_offset, 1442 object->pager, object->paging_offset, 1443 object->shadow_offset); 1444 object->paging_in_progress--; 1445 if (object->paging_in_progress == 0) 1446 wakeup((caddr_t)object); 1447 } else { 1448 object->paging_in_progress++; 1449 /* 1450 * grab the shadow objects pager 1451 */ 1452 object->pager = backing_object->pager; 1453 object->paging_offset = backing_object->paging_offset + backing_offset; 1454 vm_object_remove(backing_object->pager); 1455 backing_object->pager = NULL; 1456 /* 1457 * free unnecessary blocks 1458 */ 1459 swap_pager_freespace(object->pager, 0, object->paging_offset); 1460 object->paging_in_progress--; 1461 if (object->paging_in_progress == 0) 1462 wakeup((caddr_t)object); 1463 } 1464 backing_object->paging_in_progress--; 1465 if (backing_object->paging_in_progress == 0) 1466 wakeup((caddr_t)backing_object); 1467 } 1468 1469 1470 /* 1471 * Object now shadows whatever backing_object did. 1472 * Note that the reference to backing_object->shadow 1473 * moves from within backing_object to within object. 1474 */ 1475 1476 TAILQ_REMOVE(&object->shadow->reverse_shadow_head, object, reverse_shadow_list); 1477 if( backing_object->shadow) 1478 TAILQ_REMOVE(&backing_object->shadow->reverse_shadow_head, backing_object, reverse_shadow_list); 1479 object->shadow = backing_object->shadow; 1480 if( object->shadow) 1481 TAILQ_INSERT_TAIL(&object->shadow->reverse_shadow_head, object, reverse_shadow_list); 1482 1483 object->shadow_offset += backing_object->shadow_offset; 1484 if (object->shadow != NULL && 1485 object->shadow->copy != NULL) { 1486 panic("vm_object_collapse: we collapsed a copy-object!"); 1487 } 1488 /* 1489 * Discard backing_object. 1490 * 1491 * Since the backing object has no pages, no 1492 * pager left, and no object references within it, 1493 * all that is necessary is to dispose of it. 1494 */ 1495 1496 vm_object_unlock(backing_object); 1497 1498 simple_lock(&vm_object_list_lock); 1499 TAILQ_REMOVE(&vm_object_list, backing_object, 1500 object_list); 1501 vm_object_count--; 1502 simple_unlock(&vm_object_list_lock); 1503 1504 free((caddr_t)backing_object, M_VMOBJ); 1505 1506 object_collapses++; 1507 } 1508 else { 1509 /* 1510 * If all of the pages in the backing object are 1511 * shadowed by the parent object, the parent 1512 * object no longer has to shadow the backing 1513 * object; it can shadow the next one in the 1514 * chain. 1515 * 1516 * The backing object must not be paged out - we'd 1517 * have to check all of the paged-out pages, as 1518 * well. 1519 */ 1520 1521 if (backing_object->pager != NULL) { 1522 vm_object_unlock(backing_object); 1523 return; 1524 } 1525 1526 /* 1527 * Should have a check for a 'small' number 1528 * of pages here. 1529 */ 1530 1531 for( p = backing_object->memq.tqh_first;p;p=p->listq.tqe_next) { 1532 new_offset = (p->offset - backing_offset); 1533 1534 /* 1535 * If the parent has a page here, or if 1536 * this page falls outside the parent, 1537 * keep going. 1538 * 1539 * Otherwise, the backing_object must be 1540 * left in the chain. 1541 */ 1542 1543 if (p->offset >= backing_offset && 1544 new_offset <= size && 1545 ((pp = vm_page_lookup(object, new_offset)) == NULL || (pp->flags & PG_FAKE)) && 1546 (!object->pager || !vm_pager_has_page(object->pager, object->paging_offset+new_offset))) { 1547 /* 1548 * Page still needed. 1549 * Can't go any further. 1550 */ 1551 vm_object_unlock(backing_object); 1552 return; 1553 } 1554 } 1555 1556 /* 1557 * Make the parent shadow the next object 1558 * in the chain. Deallocating backing_object 1559 * will not remove it, since its reference 1560 * count is at least 2. 1561 */ 1562 1563 TAILQ_REMOVE(&object->shadow->reverse_shadow_head, object, reverse_shadow_list); 1564 vm_object_reference(object->shadow = backing_object->shadow); 1565 if( object->shadow) 1566 TAILQ_INSERT_TAIL(&object->shadow->reverse_shadow_head, object, reverse_shadow_list); 1567 object->shadow_offset += backing_object->shadow_offset; 1568 1569 /* 1570 * Backing object might have had a copy pointer 1571 * to us. If it did, clear it. 1572 */ 1573 if (backing_object->copy == object) { 1574 backing_object->copy = NULL; 1575 } 1576 1577 /* Drop the reference count on backing_object. 1578 * Since its ref_count was at least 2, it 1579 * will not vanish; so we don't need to call 1580 * vm_object_deallocate. 1581 */ 1582 if (backing_object->ref_count == 1) 1583 printf("should have called obj deallocate\n"); 1584 backing_object->ref_count--; 1585 vm_object_unlock(backing_object); 1586 1587 object_bypasses ++; 1588 1589 } 1590 1591 /* 1592 * Try again with this object's new backing object. 1593 */ 1594 } 1595} 1596 1597/* 1598 * vm_object_page_remove: [internal] 1599 * 1600 * Removes all physical pages in the specified 1601 * object range from the object's list of pages. 1602 * 1603 * The object must be locked. 1604 */ 1605void 1606vm_object_page_remove(object, start, end) 1607 register vm_object_t object; 1608 register vm_offset_t start; 1609 register vm_offset_t end; 1610{ 1611 register vm_page_t p, next; 1612 vm_offset_t size; 1613 int s; 1614 1615 if (object == NULL) 1616 return; 1617 1618 object->paging_in_progress++; 1619 start = trunc_page(start); 1620 end = round_page(end); 1621again: 1622 size = end-start; 1623 if (size > 4*PAGE_SIZE || size >= object->size/4) { 1624 for (p = object->memq.tqh_first; (p != NULL && size > 0); p = next) { 1625 next = p->listq.tqe_next; 1626 if ((start <= p->offset) && (p->offset < end)) { 1627 s=splhigh(); 1628 if (p->flags & PG_BUSY) { 1629 p->flags |= PG_WANTED; 1630 tsleep((caddr_t) p, PVM, "vmopar", 0); 1631 splx(s); 1632 goto again; 1633 } 1634 splx(s); 1635 pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); 1636 vm_page_lock_queues(); 1637 vm_page_free(p); 1638 vm_page_unlock_queues(); 1639 size -= PAGE_SIZE; 1640 } 1641 } 1642 } else { 1643 while (size > 0) { 1644 while ((p = vm_page_lookup(object, start)) != 0) { 1645 s = splhigh(); 1646 if (p->flags & PG_BUSY) { 1647 p->flags |= PG_WANTED; 1648 tsleep((caddr_t) p, PVM, "vmopar", 0); 1649 splx(s); 1650 goto again; 1651 } 1652 splx(s); 1653 pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); 1654 vm_page_lock_queues(); 1655 vm_page_free(p); 1656 vm_page_unlock_queues(); 1657 } 1658 start += PAGE_SIZE; 1659 size -= PAGE_SIZE; 1660 } 1661 } 1662 --object->paging_in_progress; 1663 if( object->paging_in_progress == 0) 1664 wakeup((caddr_t) object); 1665} 1666 1667/* 1668 * Routine: vm_object_coalesce 1669 * Function: Coalesces two objects backing up adjoining 1670 * regions of memory into a single object. 1671 * 1672 * returns TRUE if objects were combined. 1673 * 1674 * NOTE: Only works at the moment if the second object is NULL - 1675 * if it's not, which object do we lock first? 1676 * 1677 * Parameters: 1678 * prev_object First object to coalesce 1679 * prev_offset Offset into prev_object 1680 * next_object Second object into coalesce 1681 * next_offset Offset into next_object 1682 * 1683 * prev_size Size of reference to prev_object 1684 * next_size Size of reference to next_object 1685 * 1686 * Conditions: 1687 * The object must *not* be locked. 1688 */ 1689boolean_t vm_object_coalesce(prev_object, next_object, 1690 prev_offset, next_offset, 1691 prev_size, next_size) 1692 1693 register vm_object_t prev_object; 1694 vm_object_t next_object; 1695 vm_offset_t prev_offset, next_offset; 1696 vm_size_t prev_size, next_size; 1697{ 1698 vm_size_t newsize; 1699 1700#ifdef lint 1701 next_offset++; 1702#endif 1703 1704 if (next_object != NULL) { 1705 return(FALSE); 1706 } 1707 1708 if (prev_object == NULL) { 1709 return(TRUE); 1710 } 1711 1712 vm_object_lock(prev_object); 1713 1714 /* 1715 * Try to collapse the object first 1716 */ 1717 vm_object_collapse(prev_object); 1718 1719 /* 1720 * Can't coalesce if: 1721 * . more than one reference 1722 * . paged out 1723 * . shadows another object 1724 * . has a copy elsewhere 1725 * (any of which mean that the pages not mapped to 1726 * prev_entry may be in use anyway) 1727 */ 1728 1729 if (prev_object->ref_count > 1 || 1730 prev_object->pager != NULL || 1731 prev_object->shadow != NULL || 1732 prev_object->copy != NULL) { 1733 vm_object_unlock(prev_object); 1734 return(FALSE); 1735 } 1736 1737 /* 1738 * Remove any pages that may still be in the object from 1739 * a previous deallocation. 1740 */ 1741 1742 vm_object_page_remove(prev_object, 1743 prev_offset + prev_size, 1744 prev_offset + prev_size + next_size); 1745 1746 /* 1747 * Extend the object if necessary. 1748 */ 1749 newsize = prev_offset + prev_size + next_size; 1750 if (newsize > prev_object->size) 1751 prev_object->size = newsize; 1752 1753 vm_object_unlock(prev_object); 1754 return(TRUE); 1755} 1756 1757/* 1758 * returns page after looking up in shadow chain 1759 */ 1760 1761vm_page_t 1762vm_object_page_lookup(object, offset) 1763 vm_object_t object; 1764 vm_offset_t offset; 1765{ 1766 vm_page_t m; 1767 if (!(m=vm_page_lookup(object, offset))) { 1768 if (!object->shadow) 1769 return 0; 1770 else 1771 return vm_object_page_lookup(object->shadow, offset + object->shadow_offset); 1772 } 1773 return m; 1774} 1775 1776#define DEBUG 1777#if defined(DEBUG) || defined(DDB) 1778/* 1779 * vm_object_print: [ debug ] 1780 */ 1781void vm_object_print(object, full) 1782 vm_object_t object; 1783 boolean_t full; 1784{ 1785 register vm_page_t p; 1786 extern indent; 1787 1788 register int count; 1789 1790 if (object == NULL) 1791 return; 1792 1793 iprintf("Object 0x%x: size=0x%x, res=%d, ref=%d, ", 1794 (int) object, (int) object->size, 1795 object->resident_page_count, object->ref_count); 1796 printf("pager=0x%x+0x%x, shadow=(0x%x)+0x%x\n", 1797 (int) object->pager, (int) object->paging_offset, 1798 (int) object->shadow, (int) object->shadow_offset); 1799 printf("cache: next=%p, prev=%p\n", 1800 object->cached_list.tqe_next, object->cached_list.tqe_prev); 1801 1802 if (!full) 1803 return; 1804 1805 indent += 2; 1806 count = 0; 1807 for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) { 1808 if (count == 0) 1809 iprintf("memory:="); 1810 else if (count == 6) { 1811 printf("\n"); 1812 iprintf(" ..."); 1813 count = 0; 1814 } else 1815 printf(","); 1816 count++; 1817 1818 printf("(off=0x%lx,page=0x%lx)", 1819 (u_long)p->offset, (u_long)VM_PAGE_TO_PHYS(p)); 1820 } 1821 if (count != 0) 1822 printf("\n"); 1823 indent -= 2; 1824} 1825#endif /* defined(DEBUG) || defined(DDB) */ 1826