vm_object.c revision 28991
1/* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $Id: vm_object.c,v 1.95 1997/08/05 00:02:04 dyson Exp $ 65 */ 66 67/* 68 * Virtual memory object module. 69 */ 70 71#include <sys/param.h> 72#include <sys/systm.h> 73#include <sys/kernel.h> 74#include <sys/proc.h> /* for curproc, pageproc */ 75#include <sys/malloc.h> 76#include <sys/vnode.h> 77#include <sys/mount.h> 78#include <sys/vmmeter.h> 79#include <sys/mman.h> 80 81#include <vm/vm.h> 82#include <vm/vm_param.h> 83#include <vm/vm_prot.h> 84#include <sys/lock.h> 85#include <vm/pmap.h> 86#include <vm/vm_map.h> 87#include <vm/vm_object.h> 88#include <vm/vm_page.h> 89#include <vm/vm_pageout.h> 90#include <vm/vm_pager.h> 91#include <vm/swap_pager.h> 92#include <vm/vm_kern.h> 93#include <vm/vm_extern.h> 94 95static void vm_object_qcollapse __P((vm_object_t object)); 96#ifdef not_used 97static void vm_object_deactivate_pages __P((vm_object_t)); 98#endif 99static void vm_object_terminate __P((vm_object_t)); 100static void vm_object_cache_trim __P((void)); 101 102/* 103 * Virtual memory objects maintain the actual data 104 * associated with allocated virtual memory. A given 105 * page of memory exists within exactly one object. 106 * 107 * An object is only deallocated when all "references" 108 * are given up. Only one "reference" to a given 109 * region of an object should be writeable. 110 * 111 * Associated with each object is a list of all resident 112 * memory pages belonging to that object; this list is 113 * maintained by the "vm_page" module, and locked by the object's 114 * lock. 115 * 116 * Each object also records a "pager" routine which is 117 * used to retrieve (and store) pages to the proper backing 118 * storage. In addition, objects may be backed by other 119 * objects from which they were virtual-copied. 120 * 121 * The only items within the object structure which are 122 * modified after time of creation are: 123 * reference count locked by object's lock 124 * pager routine locked by object's lock 125 * 126 */ 127 128int vm_object_cache_max; 129struct object_q vm_object_cached_list; 130static int vm_object_cached; /* size of cached list */ 131struct object_q vm_object_list; 132struct simplelock vm_object_list_lock; 133static long vm_object_count; /* count of all objects */ 134vm_object_t kernel_object; 135vm_object_t kmem_object; 136static struct vm_object kernel_object_store; 137static struct vm_object kmem_object_store; 138extern int vm_pageout_page_count; 139 140static long object_collapses; 141static long object_bypasses; 142static int next_index; 143 144void 145_vm_object_allocate(type, size, object) 146 objtype_t type; 147 vm_size_t size; 148 register vm_object_t object; 149{ 150 TAILQ_INIT(&object->memq); 151 TAILQ_INIT(&object->shadow_head); 152 153 object->type = type; 154 object->size = size; 155 object->ref_count = 1; 156 object->flags = 0; 157 object->behavior = OBJ_NORMAL; 158 object->paging_in_progress = 0; 159 object->resident_page_count = 0; 160 object->shadow_count = 0; 161 object->pg_color = next_index; 162 next_index = (next_index + PQ_PRIME1) & PQ_L2_MASK; 163 object->handle = NULL; 164 object->paging_offset = (vm_ooffset_t) 0; 165 object->backing_object = NULL; 166 object->backing_object_offset = (vm_ooffset_t) 0; 167 object->page_hint = NULL; 168 169 object->last_read = 0; 170 171 TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); 172 vm_object_count++; 173} 174 175/* 176 * vm_object_init: 177 * 178 * Initialize the VM objects module. 179 */ 180void 181vm_object_init() 182{ 183 TAILQ_INIT(&vm_object_cached_list); 184 TAILQ_INIT(&vm_object_list); 185 simple_lock_init(&vm_object_list_lock); 186 vm_object_count = 0; 187 188 vm_object_cache_max = 84; 189 if (cnt.v_page_count > 1000) 190 vm_object_cache_max += (cnt.v_page_count - 1000) / 4; 191 192 kernel_object = &kernel_object_store; 193 _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), 194 kernel_object); 195 196 kmem_object = &kmem_object_store; 197 _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), 198 kmem_object); 199} 200 201/* 202 * vm_object_allocate: 203 * 204 * Returns a new object with the given size. 205 */ 206 207vm_object_t 208vm_object_allocate(type, size) 209 objtype_t type; 210 vm_size_t size; 211{ 212 register vm_object_t result; 213 214 result = (vm_object_t) 215 malloc((u_long) sizeof *result, M_VMOBJ, M_WAITOK); 216 217 218 _vm_object_allocate(type, size, result); 219 220 return (result); 221} 222 223 224/* 225 * vm_object_reference: 226 * 227 * Gets another reference to the given object. 228 */ 229void 230vm_object_reference(object) 231 register vm_object_t object; 232{ 233 if (object == NULL) 234 return; 235 236 if (object->ref_count == 0) { 237 if ((object->flags & OBJ_CANPERSIST) == 0) 238 panic("vm_object_reference: non-persistent object with 0 ref_count"); 239 TAILQ_REMOVE(&vm_object_cached_list, object, cached_list); 240 vm_object_cached--; 241 } 242 object->ref_count++; 243} 244 245/* 246 * vm_object_deallocate: 247 * 248 * Release a reference to the specified object, 249 * gained either through a vm_object_allocate 250 * or a vm_object_reference call. When all references 251 * are gone, storage associated with this object 252 * may be relinquished. 253 * 254 * No object may be locked. 255 */ 256void 257vm_object_deallocate(object) 258 vm_object_t object; 259{ 260 vm_object_t temp; 261 262 while (object != NULL) { 263 264 if (object->ref_count == 0) 265 panic("vm_object_deallocate: object deallocated too many times"); 266 267 /* 268 * Lose the reference 269 */ 270 object->ref_count--; 271 if (object->ref_count != 0) { 272 if ((object->ref_count == 1) && 273 (object->handle == NULL) && 274 (object->type == OBJT_DEFAULT || 275 object->type == OBJT_SWAP)) { 276 vm_object_t robject; 277 robject = TAILQ_FIRST(&object->shadow_head); 278 if ((robject != NULL) && 279 (robject->handle == NULL) && 280 (robject->type == OBJT_DEFAULT || 281 robject->type == OBJT_SWAP)) { 282 int s; 283 robject->ref_count += 2; 284 object->ref_count += 2; 285 286 do { 287 s = splvm(); 288 while (robject->paging_in_progress) { 289 robject->flags |= OBJ_PIPWNT; 290 tsleep(robject, PVM, "objde1", 0); 291 } 292 293 while (object->paging_in_progress) { 294 object->flags |= OBJ_PIPWNT; 295 tsleep(object, PVM, "objde2", 0); 296 } 297 splx(s); 298 299 } while( object->paging_in_progress || robject->paging_in_progress); 300 301 object->ref_count -= 2; 302 robject->ref_count -= 2; 303 if( robject->ref_count == 0) { 304 robject->ref_count += 1; 305 object = robject; 306 continue; 307 } 308 vm_object_collapse(robject); 309 return; 310 } 311 } 312 /* 313 * If there are still references, then we are done. 314 */ 315 return; 316 } 317 318 if (object->type == OBJT_VNODE) { 319 struct vnode *vp = object->handle; 320 321 vp->v_flag &= ~VTEXT; 322 } 323 324 /* 325 * See if this object can persist and has some resident 326 * pages. If so, enter it in the cache. 327 */ 328 if (object->flags & OBJ_CANPERSIST) { 329 if (object->resident_page_count != 0) { 330#if 0 331 vm_object_page_clean(object, 0, 0 ,TRUE, TRUE); 332#endif 333 TAILQ_INSERT_TAIL(&vm_object_cached_list, object, 334 cached_list); 335 vm_object_cached++; 336 337 vm_object_cache_trim(); 338 return; 339 } else { 340 object->flags &= ~OBJ_CANPERSIST; 341 } 342 } 343 344 /* 345 * Make sure no one uses us. 346 */ 347 object->flags |= OBJ_DEAD; 348 349 temp = object->backing_object; 350 if (temp) { 351 TAILQ_REMOVE(&temp->shadow_head, object, shadow_list); 352 --temp->shadow_count; 353 } 354 vm_object_terminate(object); 355 /* unlocks and deallocates object */ 356 object = temp; 357 } 358} 359 360/* 361 * vm_object_terminate actually destroys the specified object, freeing 362 * up all previously used resources. 363 * 364 * The object must be locked. 365 */ 366static void 367vm_object_terminate(object) 368 register vm_object_t object; 369{ 370 register vm_page_t p; 371 int s; 372 373 if (object->flags & OBJ_VFS_REF) 374 panic("vm_object_deallocate: freeing VFS_REF'ed object"); 375 376 /* 377 * wait for the pageout daemon to be done with the object 378 */ 379 s = splvm(); 380 while (object->paging_in_progress) { 381 object->flags |= OBJ_PIPWNT; 382 tsleep(object, PVM, "objtrm", 0); 383 } 384 splx(s); 385 386 if (object->paging_in_progress != 0) 387 panic("vm_object_deallocate: pageout in progress"); 388 389 /* 390 * Clean and free the pages, as appropriate. All references to the 391 * object are gone, so we don't need to lock it. 392 */ 393 if (object->type == OBJT_VNODE) { 394 struct vnode *vp = object->handle; 395 struct proc *p = curproc; /* XXX */ 396 int waslocked; 397 398 waslocked = VOP_ISLOCKED(vp); 399 if (!waslocked) 400 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 401 vm_object_page_clean(object, 0, 0, TRUE, FALSE); 402 vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0); 403 if (!waslocked) 404 VOP_UNLOCK(vp, 0, p); 405 } 406 407 /* 408 * Now free the pages. For internal objects, this also removes them 409 * from paging queues. 410 */ 411 while ((p = TAILQ_FIRST(&object->memq)) != NULL) { 412 if (p->busy || (p->flags & PG_BUSY)) 413 printf("vm_object_terminate: freeing busy page\n"); 414 PAGE_WAKEUP(p); 415 vm_page_free(p); 416 cnt.v_pfree++; 417 } 418 419 /* 420 * Let the pager know object is dead. 421 */ 422 vm_pager_deallocate(object); 423 424 simple_lock(&vm_object_list_lock); 425 TAILQ_REMOVE(&vm_object_list, object, object_list); 426 vm_object_count--; 427 simple_unlock(&vm_object_list_lock); 428 429 wakeup(object); 430 431 /* 432 * Free the space for the object. 433 */ 434 free((caddr_t) object, M_VMOBJ); 435} 436 437/* 438 * vm_object_page_clean 439 * 440 * Clean all dirty pages in the specified range of object. 441 * Leaves page on whatever queue it is currently on. 442 * 443 * Odd semantics: if start == end, we clean everything. 444 * 445 * The object must be locked. 446 */ 447 448void 449vm_object_page_clean(object, start, end, syncio, lockflag) 450 vm_object_t object; 451 vm_pindex_t start; 452 vm_pindex_t end; 453 boolean_t syncio; 454 boolean_t lockflag; 455{ 456 register vm_page_t p, np, tp; 457 register vm_offset_t tstart, tend; 458 vm_pindex_t pi; 459 int s; 460 struct vnode *vp; 461 int runlen; 462 int maxf; 463 int chkb; 464 int maxb; 465 int i; 466 vm_page_t maf[vm_pageout_page_count]; 467 vm_page_t mab[vm_pageout_page_count]; 468 vm_page_t ma[vm_pageout_page_count]; 469 struct proc *pproc = curproc; /* XXX */ 470 471 if (object->type != OBJT_VNODE || 472 (object->flags & OBJ_MIGHTBEDIRTY) == 0) 473 return; 474 475 vp = object->handle; 476 477 if (lockflag) 478 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, pproc); 479 object->flags |= OBJ_CLEANING; 480 481 tstart = start; 482 if (end == 0) { 483 tend = object->size; 484 } else { 485 tend = end; 486 } 487 if ((tstart == 0) && (tend == object->size)) { 488 object->flags &= ~(OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY); 489 } 490 for(p = TAILQ_FIRST(&object->memq); p; p = TAILQ_NEXT(p, listq)) 491 p->flags |= PG_CLEANCHK; 492 493rescan: 494 for(p = TAILQ_FIRST(&object->memq); p; p = np) { 495 np = TAILQ_NEXT(p, listq); 496 497 pi = p->pindex; 498 if (((p->flags & PG_CLEANCHK) == 0) || 499 (pi < tstart) || (pi >= tend) || 500 (p->valid == 0) || 501 ((p->queue - p->pc) == PQ_CACHE)) { 502 p->flags &= ~PG_CLEANCHK; 503 continue; 504 } 505 506 vm_page_test_dirty(p); 507 if ((p->dirty & p->valid) == 0) { 508 p->flags &= ~PG_CLEANCHK; 509 continue; 510 } 511 512 s = splvm(); 513 if ((p->flags & PG_BUSY) || p->busy) { 514 p->flags |= PG_WANTED|PG_REFERENCED; 515 tsleep(p, PVM, "vpcwai", 0); 516 splx(s); 517 goto rescan; 518 } 519 splx(s); 520 521 s = splvm(); 522 maxf = 0; 523 for(i=1;i<vm_pageout_page_count;i++) { 524 if (tp = vm_page_lookup(object, pi + i)) { 525 if ((tp->flags & PG_BUSY) || 526 (tp->flags & PG_CLEANCHK) == 0) 527 break; 528 if((tp->queue - tp->pc) == PQ_CACHE) { 529 tp->flags &= ~PG_CLEANCHK; 530 break; 531 } 532 vm_page_test_dirty(tp); 533 if ((tp->dirty & tp->valid) == 0) { 534 tp->flags &= ~PG_CLEANCHK; 535 break; 536 } 537 maf[ i - 1 ] = tp; 538 maxf++; 539 continue; 540 } 541 break; 542 } 543 544 maxb = 0; 545 chkb = vm_pageout_page_count - maxf; 546 if (chkb) { 547 for(i = 1; i < chkb;i++) { 548 if (tp = vm_page_lookup(object, pi - i)) { 549 if ((tp->flags & PG_BUSY) || 550 (tp->flags & PG_CLEANCHK) == 0) 551 break; 552 if((tp->queue - tp->pc) == PQ_CACHE) { 553 tp->flags &= ~PG_CLEANCHK; 554 break; 555 } 556 vm_page_test_dirty(tp); 557 if ((tp->dirty & tp->valid) == 0) { 558 tp->flags &= ~PG_CLEANCHK; 559 break; 560 } 561 mab[ i - 1 ] = tp; 562 maxb++; 563 continue; 564 } 565 break; 566 } 567 } 568 569 for(i=0;i<maxb;i++) { 570 int index = (maxb - i) - 1; 571 ma[index] = mab[i]; 572 ma[index]->flags |= PG_BUSY; 573 ma[index]->flags &= ~PG_CLEANCHK; 574 vm_page_protect(ma[index], VM_PROT_READ); 575 } 576 vm_page_protect(p, VM_PROT_READ); 577 p->flags |= PG_BUSY; 578 p->flags &= ~PG_CLEANCHK; 579 ma[maxb] = p; 580 for(i=0;i<maxf;i++) { 581 int index = (maxb + i) + 1; 582 ma[index] = maf[i]; 583 ma[index]->flags |= PG_BUSY; 584 ma[index]->flags &= ~PG_CLEANCHK; 585 vm_page_protect(ma[index], VM_PROT_READ); 586 } 587 runlen = maxb + maxf + 1; 588 splx(s); 589 vm_pageout_flush(ma, runlen, 0); 590 goto rescan; 591 } 592 593 VOP_FSYNC(vp, NULL, syncio, curproc); 594 595 if (lockflag) 596 VOP_UNLOCK(vp, 0, pproc); 597 object->flags &= ~OBJ_CLEANING; 598 return; 599} 600 601#ifdef not_used 602/* XXX I cannot tell if this should be an exported symbol */ 603/* 604 * vm_object_deactivate_pages 605 * 606 * Deactivate all pages in the specified object. (Keep its pages 607 * in memory even though it is no longer referenced.) 608 * 609 * The object must be locked. 610 */ 611static void 612vm_object_deactivate_pages(object) 613 register vm_object_t object; 614{ 615 register vm_page_t p, next; 616 617 for (p = TAILQ_FIRST(&object->memq); p != NULL; p = next) { 618 next = TAILQ_NEXT(p, listq); 619 vm_page_deactivate(p); 620 } 621} 622#endif 623 624/* 625 * Trim the object cache to size. 626 */ 627static void 628vm_object_cache_trim() 629{ 630 register vm_object_t object; 631 632 while (vm_object_cached > vm_object_cache_max) { 633 object = TAILQ_FIRST(&vm_object_cached_list); 634 635 vm_object_reference(object); 636 pager_cache(object, FALSE); 637 } 638} 639 640 641/* 642 * vm_object_pmap_copy: 643 * 644 * Makes all physical pages in the specified 645 * object range copy-on-write. No writeable 646 * references to these pages should remain. 647 * 648 * The object must *not* be locked. 649 */ 650void 651vm_object_pmap_copy(object, start, end) 652 register vm_object_t object; 653 register vm_pindex_t start; 654 register vm_pindex_t end; 655{ 656 register vm_page_t p; 657 658 if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0) 659 return; 660 661 for (p = TAILQ_FIRST(&object->memq); 662 p != NULL; 663 p = TAILQ_NEXT(p, listq)) { 664 vm_page_protect(p, VM_PROT_READ); 665 } 666 667 object->flags &= ~OBJ_WRITEABLE; 668} 669 670/* 671 * vm_object_pmap_remove: 672 * 673 * Removes all physical pages in the specified 674 * object range from all physical maps. 675 * 676 * The object must *not* be locked. 677 */ 678void 679vm_object_pmap_remove(object, start, end) 680 register vm_object_t object; 681 register vm_pindex_t start; 682 register vm_pindex_t end; 683{ 684 register vm_page_t p; 685 if (object == NULL) 686 return; 687 for (p = TAILQ_FIRST(&object->memq); 688 p != NULL; 689 p = TAILQ_NEXT(p, listq)) { 690 if (p->pindex >= start && p->pindex < end) 691 vm_page_protect(p, VM_PROT_NONE); 692 } 693 if ((start == 0) && (object->size == end)) 694 object->flags &= ~OBJ_WRITEABLE; 695} 696 697/* 698 * vm_object_madvise: 699 * 700 * Implements the madvise function at the object/page level. 701 */ 702void 703vm_object_madvise(object, pindex, count, advise) 704 vm_object_t object; 705 vm_pindex_t pindex; 706 int count; 707 int advise; 708{ 709 int s; 710 vm_pindex_t end, tpindex; 711 vm_object_t tobject; 712 vm_page_t m; 713 714 if (object == NULL) 715 return; 716 717 end = pindex + count; 718 719 for (; pindex < end; pindex += 1) { 720 721relookup: 722 tobject = object; 723 tpindex = pindex; 724shadowlookup: 725 m = vm_page_lookup(tobject, tpindex); 726 if (m == NULL) { 727 if (tobject->type != OBJT_DEFAULT) { 728 continue; 729 } 730 731 tobject = tobject->backing_object; 732 if ((tobject == NULL) || (tobject->ref_count != 1)) { 733 continue; 734 } 735 tpindex += OFF_TO_IDX(tobject->backing_object_offset); 736 goto shadowlookup; 737 } 738 739 /* 740 * If the page is busy or not in a normal active state, 741 * we skip it. Things can break if we mess with pages 742 * in any of the below states. 743 */ 744 if (m->hold_count || m->wire_count || 745 m->valid != VM_PAGE_BITS_ALL) { 746 continue; 747 } 748 749 if (m->busy || (m->flags & PG_BUSY)) { 750 s = splvm(); 751 if (m->busy || (m->flags & PG_BUSY)) { 752 m->flags |= PG_WANTED; 753 tsleep(m, PVM, "madvpw", 0); 754 } 755 splx(s); 756 goto relookup; 757 } 758 759 if (advise == MADV_WILLNEED) { 760 if (m->queue != PQ_ACTIVE) 761 vm_page_activate(m); 762 } else if (advise == MADV_DONTNEED) { 763 vm_page_deactivate(m); 764 } else if (advise == MADV_FREE) { 765 pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 766 m->dirty = 0; 767 /* 768 * Force a demand zero if attempt to read from swap. 769 * We currently don't handle vnode files correctly, 770 * and will reread stale contents unnecessarily. 771 */ 772 if (object->type == OBJT_SWAP) 773 swap_pager_dmzspace(tobject, m->pindex, 1); 774 } 775 } 776} 777 778/* 779 * vm_object_shadow: 780 * 781 * Create a new object which is backed by the 782 * specified existing object range. The source 783 * object reference is deallocated. 784 * 785 * The new object and offset into that object 786 * are returned in the source parameters. 787 */ 788 789void 790vm_object_shadow(object, offset, length) 791 vm_object_t *object; /* IN/OUT */ 792 vm_ooffset_t *offset; /* IN/OUT */ 793 vm_size_t length; 794{ 795 register vm_object_t source; 796 register vm_object_t result; 797 798 source = *object; 799 800 /* 801 * Allocate a new object with the given length 802 */ 803 804 if ((result = vm_object_allocate(OBJT_DEFAULT, length)) == NULL) 805 panic("vm_object_shadow: no object for shadowing"); 806 807 /* 808 * The new object shadows the source object, adding a reference to it. 809 * Our caller changes his reference to point to the new object, 810 * removing a reference to the source object. Net result: no change 811 * of reference count. 812 */ 813 result->backing_object = source; 814 if (source) { 815 TAILQ_INSERT_TAIL(&source->shadow_head, result, shadow_list); 816 ++source->shadow_count; 817 } 818 819 /* 820 * Store the offset into the source object, and fix up the offset into 821 * the new object. 822 */ 823 824 result->backing_object_offset = *offset; 825 826 /* 827 * Return the new things 828 */ 829 830 *offset = 0; 831 *object = result; 832} 833 834 835/* 836 * this version of collapse allows the operation to occur earlier and 837 * when paging_in_progress is true for an object... This is not a complete 838 * operation, but should plug 99.9% of the rest of the leaks. 839 */ 840static void 841vm_object_qcollapse(object) 842 register vm_object_t object; 843{ 844 register vm_object_t backing_object; 845 register vm_pindex_t backing_offset_index, paging_offset_index; 846 vm_pindex_t backing_object_paging_offset_index; 847 vm_pindex_t new_pindex; 848 register vm_page_t p, pp; 849 register vm_size_t size; 850 851 backing_object = object->backing_object; 852 if (backing_object->ref_count != 1) 853 return; 854 855 backing_object->ref_count += 2; 856 857 backing_offset_index = OFF_TO_IDX(object->backing_object_offset); 858 backing_object_paging_offset_index = OFF_TO_IDX(backing_object->paging_offset); 859 paging_offset_index = OFF_TO_IDX(object->paging_offset); 860 size = object->size; 861 p = TAILQ_FIRST(&backing_object->memq); 862 while (p) { 863 vm_page_t next; 864 865 next = TAILQ_NEXT(p, listq); 866 if ((p->flags & (PG_BUSY | PG_FICTITIOUS)) || 867 ((p->queue - p->pc) == PQ_CACHE) || 868 !p->valid || p->hold_count || p->wire_count || p->busy) { 869 p = next; 870 continue; 871 } 872 new_pindex = p->pindex - backing_offset_index; 873 if (p->pindex < backing_offset_index || 874 new_pindex >= size) { 875 if (backing_object->type == OBJT_SWAP) 876 swap_pager_freespace(backing_object, 877 backing_object_paging_offset_index+p->pindex, 878 1); 879 vm_page_protect(p, VM_PROT_NONE); 880 vm_page_free(p); 881 } else { 882 pp = vm_page_lookup(object, new_pindex); 883 if (pp != NULL || (object->type == OBJT_SWAP && vm_pager_has_page(object, 884 paging_offset_index + new_pindex, NULL, NULL))) { 885 if (backing_object->type == OBJT_SWAP) 886 swap_pager_freespace(backing_object, 887 backing_object_paging_offset_index + p->pindex, 1); 888 vm_page_protect(p, VM_PROT_NONE); 889 vm_page_free(p); 890 } else { 891 if (backing_object->type == OBJT_SWAP) 892 swap_pager_freespace(backing_object, 893 backing_object_paging_offset_index + p->pindex, 1); 894 vm_page_rename(p, object, new_pindex); 895 vm_page_protect(p, VM_PROT_NONE); 896 p->dirty = VM_PAGE_BITS_ALL; 897 } 898 } 899 p = next; 900 } 901 backing_object->ref_count -= 2; 902} 903 904/* 905 * vm_object_collapse: 906 * 907 * Collapse an object with the object backing it. 908 * Pages in the backing object are moved into the 909 * parent, and the backing object is deallocated. 910 */ 911void 912vm_object_collapse(object) 913 vm_object_t object; 914 915{ 916 vm_object_t backing_object; 917 vm_ooffset_t backing_offset; 918 vm_size_t size; 919 vm_pindex_t new_pindex, backing_offset_index; 920 vm_page_t p, pp; 921 922 while (TRUE) { 923 /* 924 * Verify that the conditions are right for collapse: 925 * 926 * The object exists and no pages in it are currently being paged 927 * out. 928 */ 929 if (object == NULL) 930 return; 931 932 /* 933 * Make sure there is a backing object. 934 */ 935 if ((backing_object = object->backing_object) == NULL) 936 return; 937 938 /* 939 * we check the backing object first, because it is most likely 940 * not collapsable. 941 */ 942 if (backing_object->handle != NULL || 943 (backing_object->type != OBJT_DEFAULT && 944 backing_object->type != OBJT_SWAP) || 945 (backing_object->flags & OBJ_DEAD) || 946 object->handle != NULL || 947 (object->type != OBJT_DEFAULT && 948 object->type != OBJT_SWAP) || 949 (object->flags & OBJ_DEAD)) { 950 return; 951 } 952 953 if (object->paging_in_progress != 0 || 954 backing_object->paging_in_progress != 0) { 955 vm_object_qcollapse(object); 956 return; 957 } 958 959 /* 960 * We know that we can either collapse the backing object (if 961 * the parent is the only reference to it) or (perhaps) remove 962 * the parent's reference to it. 963 */ 964 965 backing_offset = object->backing_object_offset; 966 backing_offset_index = OFF_TO_IDX(backing_offset); 967 size = object->size; 968 969 /* 970 * If there is exactly one reference to the backing object, we 971 * can collapse it into the parent. 972 */ 973 974 if (backing_object->ref_count == 1) { 975 976 backing_object->flags |= OBJ_DEAD; 977 /* 978 * We can collapse the backing object. 979 * 980 * Move all in-memory pages from backing_object to the 981 * parent. Pages that have been paged out will be 982 * overwritten by any of the parent's pages that 983 * shadow them. 984 */ 985 986 while ((p = TAILQ_FIRST(&backing_object->memq)) != 0) { 987 988 new_pindex = p->pindex - backing_offset_index; 989 990 /* 991 * If the parent has a page here, or if this 992 * page falls outside the parent, dispose of 993 * it. 994 * 995 * Otherwise, move it as planned. 996 */ 997 998 if (p->pindex < backing_offset_index || 999 new_pindex >= size) { 1000 vm_page_protect(p, VM_PROT_NONE); 1001 PAGE_WAKEUP(p); 1002 vm_page_free(p); 1003 } else { 1004 pp = vm_page_lookup(object, new_pindex); 1005 if (pp != NULL || (object->type == OBJT_SWAP && vm_pager_has_page(object, 1006 OFF_TO_IDX(object->paging_offset) + new_pindex, NULL, NULL))) { 1007 vm_page_protect(p, VM_PROT_NONE); 1008 PAGE_WAKEUP(p); 1009 vm_page_free(p); 1010 } else { 1011 vm_page_protect(p, VM_PROT_NONE); 1012 vm_page_rename(p, object, new_pindex); 1013 p->dirty = VM_PAGE_BITS_ALL; 1014 } 1015 } 1016 } 1017 1018 /* 1019 * Move the pager from backing_object to object. 1020 */ 1021 1022 if (backing_object->type == OBJT_SWAP) { 1023 backing_object->paging_in_progress++; 1024 if (object->type == OBJT_SWAP) { 1025 object->paging_in_progress++; 1026 /* 1027 * copy shadow object pages into ours 1028 * and destroy unneeded pages in 1029 * shadow object. 1030 */ 1031 swap_pager_copy( 1032 backing_object, 1033 OFF_TO_IDX(backing_object->paging_offset), 1034 object, 1035 OFF_TO_IDX(object->paging_offset), 1036 OFF_TO_IDX(object->backing_object_offset)); 1037 vm_object_pip_wakeup(object); 1038 } else { 1039 object->paging_in_progress++; 1040 /* 1041 * move the shadow backing_object's pager data to 1042 * "object" and convert "object" type to OBJT_SWAP. 1043 */ 1044 object->type = OBJT_SWAP; 1045 object->un_pager.swp.swp_nblocks = 1046 backing_object->un_pager.swp.swp_nblocks; 1047 object->un_pager.swp.swp_allocsize = 1048 backing_object->un_pager.swp.swp_allocsize; 1049 object->un_pager.swp.swp_blocks = 1050 backing_object->un_pager.swp.swp_blocks; 1051 object->un_pager.swp.swp_poip = /* XXX */ 1052 backing_object->un_pager.swp.swp_poip; 1053 object->paging_offset = backing_object->paging_offset + backing_offset; 1054 TAILQ_INSERT_TAIL(&swap_pager_un_object_list, object, pager_object_list); 1055 1056 /* 1057 * Convert backing object from OBJT_SWAP to 1058 * OBJT_DEFAULT. XXX - only the TAILQ_REMOVE is 1059 * actually necessary. 1060 */ 1061 backing_object->type = OBJT_DEFAULT; 1062 TAILQ_REMOVE(&swap_pager_un_object_list, backing_object, pager_object_list); 1063 /* 1064 * free unnecessary blocks 1065 */ 1066 swap_pager_freespace(object, 0, 1067 OFF_TO_IDX(object->paging_offset)); 1068 vm_object_pip_wakeup(object); 1069 } 1070 1071 vm_object_pip_wakeup(backing_object); 1072 } 1073 /* 1074 * Object now shadows whatever backing_object did. 1075 * Note that the reference to backing_object->backing_object 1076 * moves from within backing_object to within object. 1077 */ 1078 1079 TAILQ_REMOVE(&object->backing_object->shadow_head, object, 1080 shadow_list); 1081 --object->backing_object->shadow_count; 1082 if (backing_object->backing_object) { 1083 TAILQ_REMOVE(&backing_object->backing_object->shadow_head, 1084 backing_object, shadow_list); 1085 --backing_object->backing_object->shadow_count; 1086 } 1087 object->backing_object = backing_object->backing_object; 1088 if (object->backing_object) { 1089 TAILQ_INSERT_TAIL(&object->backing_object->shadow_head, 1090 object, shadow_list); 1091 ++object->backing_object->shadow_count; 1092 } 1093 1094 object->backing_object_offset += backing_object->backing_object_offset; 1095 /* 1096 * Discard backing_object. 1097 * 1098 * Since the backing object has no pages, no pager left, 1099 * and no object references within it, all that is 1100 * necessary is to dispose of it. 1101 */ 1102 1103 TAILQ_REMOVE(&vm_object_list, backing_object, 1104 object_list); 1105 vm_object_count--; 1106 1107 free((caddr_t) backing_object, M_VMOBJ); 1108 1109 object_collapses++; 1110 } else { 1111 /* 1112 * If all of the pages in the backing object are 1113 * shadowed by the parent object, the parent object no 1114 * longer has to shadow the backing object; it can 1115 * shadow the next one in the chain. 1116 * 1117 * The backing object must not be paged out - we'd have 1118 * to check all of the paged-out pages, as well. 1119 */ 1120 1121 if (backing_object->type != OBJT_DEFAULT) { 1122 return; 1123 } 1124 /* 1125 * Should have a check for a 'small' number of pages 1126 * here. 1127 */ 1128 1129 for (p = TAILQ_FIRST(&backing_object->memq); p; p = TAILQ_NEXT(p, listq)) { 1130 new_pindex = p->pindex - backing_offset_index; 1131 1132 /* 1133 * If the parent has a page here, or if this 1134 * page falls outside the parent, keep going. 1135 * 1136 * Otherwise, the backing_object must be left in 1137 * the chain. 1138 */ 1139 1140 if (p->pindex >= backing_offset_index && 1141 new_pindex <= size) { 1142 1143 pp = vm_page_lookup(object, new_pindex); 1144 1145 if ((pp == NULL || pp->valid == 0) && 1146 !vm_pager_has_page(object, OFF_TO_IDX(object->paging_offset) + new_pindex, NULL, NULL)) { 1147 /* 1148 * Page still needed. Can't go any 1149 * further. 1150 */ 1151 return; 1152 } 1153 } 1154 } 1155 1156 /* 1157 * Make the parent shadow the next object in the 1158 * chain. Deallocating backing_object will not remove 1159 * it, since its reference count is at least 2. 1160 */ 1161 1162 TAILQ_REMOVE(&object->backing_object->shadow_head, 1163 object, shadow_list); 1164 --object->backing_object->shadow_count; 1165 vm_object_reference(object->backing_object = backing_object->backing_object); 1166 if (object->backing_object) { 1167 TAILQ_INSERT_TAIL(&object->backing_object->shadow_head, 1168 object, shadow_list); 1169 ++object->backing_object->shadow_count; 1170 } 1171 object->backing_object_offset += backing_object->backing_object_offset; 1172 1173 /* 1174 * Drop the reference count on backing_object. Since 1175 * its ref_count was at least 2, it will not vanish; 1176 * so we don't need to call vm_object_deallocate. 1177 */ 1178 if (backing_object->ref_count == 1) 1179 printf("should have called obj deallocate\n"); 1180 backing_object->ref_count--; 1181 1182 object_bypasses++; 1183 1184 } 1185 1186 /* 1187 * Try again with this object's new backing object. 1188 */ 1189 } 1190} 1191 1192/* 1193 * vm_object_page_remove: [internal] 1194 * 1195 * Removes all physical pages in the specified 1196 * object range from the object's list of pages. 1197 * 1198 * The object must be locked. 1199 */ 1200void 1201vm_object_page_remove(object, start, end, clean_only) 1202 register vm_object_t object; 1203 register vm_pindex_t start; 1204 register vm_pindex_t end; 1205 boolean_t clean_only; 1206{ 1207 register vm_page_t p, next; 1208 unsigned int size; 1209 int s; 1210 1211 if (object == NULL) 1212 return; 1213 1214 object->paging_in_progress++; 1215again: 1216 size = end - start; 1217 if (size > 4 || size >= object->size / 4) { 1218 for (p = TAILQ_FIRST(&object->memq); p != NULL; p = next) { 1219 next = TAILQ_NEXT(p, listq); 1220 if ((start <= p->pindex) && (p->pindex < end)) { 1221 if (p->wire_count != 0) { 1222 vm_page_protect(p, VM_PROT_NONE); 1223 p->valid = 0; 1224 continue; 1225 } 1226 1227 /* 1228 * The busy flags are only cleared at 1229 * interrupt -- minimize the spl transitions 1230 */ 1231 if ((p->flags & PG_BUSY) || p->busy) { 1232 s = splvm(); 1233 if ((p->flags & PG_BUSY) || p->busy) { 1234 p->flags |= PG_WANTED; 1235 tsleep(p, PVM, "vmopar", 0); 1236 splx(s); 1237 goto again; 1238 } 1239 splx(s); 1240 } 1241 1242 if (clean_only) { 1243 vm_page_test_dirty(p); 1244 if (p->valid & p->dirty) 1245 continue; 1246 } 1247 vm_page_protect(p, VM_PROT_NONE); 1248 PAGE_WAKEUP(p); 1249 vm_page_free(p); 1250 } 1251 } 1252 } else { 1253 while (size > 0) { 1254 if ((p = vm_page_lookup(object, start)) != 0) { 1255 if (p->wire_count != 0) { 1256 p->valid = 0; 1257 vm_page_protect(p, VM_PROT_NONE); 1258 start += 1; 1259 size -= 1; 1260 continue; 1261 } 1262 /* 1263 * The busy flags are only cleared at 1264 * interrupt -- minimize the spl transitions 1265 */ 1266 if ((p->flags & PG_BUSY) || p->busy) { 1267 s = splvm(); 1268 if ((p->flags & PG_BUSY) || p->busy) { 1269 p->flags |= PG_WANTED; 1270 tsleep(p, PVM, "vmopar", 0); 1271 splx(s); 1272 goto again; 1273 } 1274 splx(s); 1275 } 1276 if (clean_only) { 1277 vm_page_test_dirty(p); 1278 if (p->valid & p->dirty) { 1279 start += 1; 1280 size -= 1; 1281 continue; 1282 } 1283 } 1284 vm_page_protect(p, VM_PROT_NONE); 1285 PAGE_WAKEUP(p); 1286 vm_page_free(p); 1287 } 1288 start += 1; 1289 size -= 1; 1290 } 1291 } 1292 vm_object_pip_wakeup(object); 1293} 1294 1295/* 1296 * Routine: vm_object_coalesce 1297 * Function: Coalesces two objects backing up adjoining 1298 * regions of memory into a single object. 1299 * 1300 * returns TRUE if objects were combined. 1301 * 1302 * NOTE: Only works at the moment if the second object is NULL - 1303 * if it's not, which object do we lock first? 1304 * 1305 * Parameters: 1306 * prev_object First object to coalesce 1307 * prev_offset Offset into prev_object 1308 * next_object Second object into coalesce 1309 * next_offset Offset into next_object 1310 * 1311 * prev_size Size of reference to prev_object 1312 * next_size Size of reference to next_object 1313 * 1314 * Conditions: 1315 * The object must *not* be locked. 1316 */ 1317boolean_t 1318vm_object_coalesce(prev_object, prev_pindex, prev_size, next_size) 1319 register vm_object_t prev_object; 1320 vm_pindex_t prev_pindex; 1321 vm_size_t prev_size, next_size; 1322{ 1323 vm_size_t newsize; 1324 1325 if (prev_object == NULL) { 1326 return (TRUE); 1327 } 1328 1329 if (prev_object->type != OBJT_DEFAULT) { 1330 return (FALSE); 1331 } 1332 1333 /* 1334 * Try to collapse the object first 1335 */ 1336 vm_object_collapse(prev_object); 1337 1338 /* 1339 * Can't coalesce if: . more than one reference . paged out . shadows 1340 * another object . has a copy elsewhere (any of which mean that the 1341 * pages not mapped to prev_entry may be in use anyway) 1342 */ 1343 1344 if (prev_object->backing_object != NULL) { 1345 return (FALSE); 1346 } 1347 1348 prev_size >>= PAGE_SHIFT; 1349 next_size >>= PAGE_SHIFT; 1350 1351 if ((prev_object->ref_count > 1) && 1352 (prev_object->size != prev_pindex + prev_size)) { 1353 return (FALSE); 1354 } 1355 1356 /* 1357 * Remove any pages that may still be in the object from a previous 1358 * deallocation. 1359 */ 1360 1361 vm_object_page_remove(prev_object, 1362 prev_pindex + prev_size, 1363 prev_pindex + prev_size + next_size, FALSE); 1364 1365 /* 1366 * Extend the object if necessary. 1367 */ 1368 newsize = prev_pindex + prev_size + next_size; 1369 if (newsize > prev_object->size) 1370 prev_object->size = newsize; 1371 1372 return (TRUE); 1373} 1374 1375#include "opt_ddb.h" 1376#ifdef DDB 1377#include <sys/kernel.h> 1378 1379#include <machine/cons.h> 1380 1381#include <ddb/ddb.h> 1382 1383static int _vm_object_in_map __P((vm_map_t map, vm_object_t object, 1384 vm_map_entry_t entry)); 1385static int vm_object_in_map __P((vm_object_t object)); 1386 1387static int 1388_vm_object_in_map(map, object, entry) 1389 vm_map_t map; 1390 vm_object_t object; 1391 vm_map_entry_t entry; 1392{ 1393 vm_map_t tmpm; 1394 vm_map_entry_t tmpe; 1395 vm_object_t obj; 1396 int entcount; 1397 1398 if (map == 0) 1399 return 0; 1400 1401 if (entry == 0) { 1402 tmpe = map->header.next; 1403 entcount = map->nentries; 1404 while (entcount-- && (tmpe != &map->header)) { 1405 if( _vm_object_in_map(map, object, tmpe)) { 1406 return 1; 1407 } 1408 tmpe = tmpe->next; 1409 } 1410 } else if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) { 1411 tmpm = entry->object.share_map; 1412 tmpe = tmpm->header.next; 1413 entcount = tmpm->nentries; 1414 while (entcount-- && tmpe != &tmpm->header) { 1415 if( _vm_object_in_map(tmpm, object, tmpe)) { 1416 return 1; 1417 } 1418 tmpe = tmpe->next; 1419 } 1420 } else if (obj = entry->object.vm_object) { 1421 for(; obj; obj=obj->backing_object) 1422 if( obj == object) { 1423 return 1; 1424 } 1425 } 1426 return 0; 1427} 1428 1429static int 1430vm_object_in_map( object) 1431 vm_object_t object; 1432{ 1433 struct proc *p; 1434 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 1435 if( !p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */) 1436 continue; 1437 if( _vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) 1438 return 1; 1439 } 1440 if( _vm_object_in_map( kernel_map, object, 0)) 1441 return 1; 1442 if( _vm_object_in_map( kmem_map, object, 0)) 1443 return 1; 1444 if( _vm_object_in_map( pager_map, object, 0)) 1445 return 1; 1446 if( _vm_object_in_map( buffer_map, object, 0)) 1447 return 1; 1448 if( _vm_object_in_map( io_map, object, 0)) 1449 return 1; 1450 if( _vm_object_in_map( phys_map, object, 0)) 1451 return 1; 1452 if( _vm_object_in_map( mb_map, object, 0)) 1453 return 1; 1454 if( _vm_object_in_map( u_map, object, 0)) 1455 return 1; 1456 return 0; 1457} 1458 1459DB_SHOW_COMMAND(vmochk, vm_object_check) 1460{ 1461 vm_object_t object; 1462 1463 /* 1464 * make sure that internal objs are in a map somewhere 1465 * and none have zero ref counts. 1466 */ 1467 for (object = TAILQ_FIRST(&vm_object_list); 1468 object != NULL; 1469 object = TAILQ_NEXT(object, object_list)) { 1470 if (object->handle == NULL && 1471 (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) { 1472 if (object->ref_count == 0) { 1473 db_printf("vmochk: internal obj has zero ref count: %d\n", 1474 object->size); 1475 } 1476 if (!vm_object_in_map(object)) { 1477 db_printf("vmochk: internal obj is not in a map: " 1478 "ref: %d, size: %d: 0x%x, backing_object: 0x%x\n", 1479 object->ref_count, object->size, 1480 object->size, object->backing_object); 1481 } 1482 } 1483 } 1484} 1485 1486/* 1487 * vm_object_print: [ debug ] 1488 */ 1489DB_SHOW_COMMAND(object, vm_object_print_static) 1490{ 1491 /* XXX convert args. */ 1492 vm_object_t object = (vm_object_t)addr; 1493 boolean_t full = have_addr; 1494 1495 register vm_page_t p; 1496 1497 /* XXX count is an (unused) arg. Avoid shadowing it. */ 1498#define count was_count 1499 1500 register int count; 1501 1502 if (object == NULL) 1503 return; 1504 1505 db_iprintf("Object 0x%x: size=0x%x, res=%d, ref=%d, ", 1506 (int) object, (int) object->size, 1507 object->resident_page_count, object->ref_count); 1508 db_printf("offset=0x%x, backing_object=(0x%x)+0x%x\n", 1509 (int) object->paging_offset, 1510 (int) object->backing_object, (int) object->backing_object_offset); 1511 db_printf("cache: next=%p, prev=%p\n", 1512 TAILQ_NEXT(object, cached_list), TAILQ_PREV(object, cached_list)); 1513 1514 if (!full) 1515 return; 1516 1517 db_indent += 2; 1518 count = 0; 1519 for (p = TAILQ_FIRST(&object->memq); p != NULL; p = TAILQ_NEXT(p, listq)) { 1520 if (count == 0) 1521 db_iprintf("memory:="); 1522 else if (count == 6) { 1523 db_printf("\n"); 1524 db_iprintf(" ..."); 1525 count = 0; 1526 } else 1527 db_printf(","); 1528 count++; 1529 1530 db_printf("(off=0x%lx,page=0x%lx)", 1531 (u_long) p->pindex, (u_long) VM_PAGE_TO_PHYS(p)); 1532 } 1533 if (count != 0) 1534 db_printf("\n"); 1535 db_indent -= 2; 1536} 1537 1538/* XXX. */ 1539#undef count 1540 1541/* XXX need this non-static entry for calling from vm_map_print. */ 1542void 1543vm_object_print(addr, have_addr, count, modif) 1544 db_expr_t addr; 1545 boolean_t have_addr; 1546 db_expr_t count; 1547 char *modif; 1548{ 1549 vm_object_print_static(addr, have_addr, count, modif); 1550} 1551 1552DB_SHOW_COMMAND(vmopag, vm_object_print_pages) 1553{ 1554 vm_object_t object; 1555 int nl = 0; 1556 int c; 1557 for (object = TAILQ_FIRST(&vm_object_list); 1558 object != NULL; 1559 object = TAILQ_NEXT(object, object_list)) { 1560 vm_pindex_t idx, fidx; 1561 vm_pindex_t osize; 1562 vm_offset_t pa = -1, padiff; 1563 int rcount; 1564 vm_page_t m; 1565 1566 db_printf("new object: 0x%x\n", object); 1567 if ( nl > 18) { 1568 c = cngetc(); 1569 if (c != ' ') 1570 return; 1571 nl = 0; 1572 } 1573 nl++; 1574 rcount = 0; 1575 fidx = 0; 1576 osize = object->size; 1577 if (osize > 128) 1578 osize = 128; 1579 for(idx=0;idx<osize;idx++) { 1580 m = vm_page_lookup(object, idx); 1581 if (m == NULL) { 1582 if (rcount) { 1583 db_printf(" index(%d)run(%d)pa(0x%x)\n", 1584 fidx, rcount, pa); 1585 if ( nl > 18) { 1586 c = cngetc(); 1587 if (c != ' ') 1588 return; 1589 nl = 0; 1590 } 1591 nl++; 1592 rcount = 0; 1593 } 1594 continue; 1595 } 1596 1597 1598 if (rcount && 1599 (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) { 1600 ++rcount; 1601 continue; 1602 } 1603 if (rcount) { 1604 padiff = pa + rcount * PAGE_SIZE - VM_PAGE_TO_PHYS(m); 1605 padiff >>= PAGE_SHIFT; 1606 padiff &= PQ_L2_MASK; 1607 if (padiff == 0) { 1608 pa = VM_PAGE_TO_PHYS(m) - rcount * PAGE_SIZE; 1609 ++rcount; 1610 continue; 1611 } 1612 db_printf(" index(%d)run(%d)pa(0x%x)", fidx, rcount, pa); 1613 db_printf("pd(%d)\n", padiff); 1614 if ( nl > 18) { 1615 c = cngetc(); 1616 if (c != ' ') 1617 return; 1618 nl = 0; 1619 } 1620 nl++; 1621 } 1622 fidx = idx; 1623 pa = VM_PAGE_TO_PHYS(m); 1624 rcount = 1; 1625 } 1626 if (rcount) { 1627 db_printf(" index(%d)run(%d)pa(0x%x)\n", fidx, rcount, pa); 1628 if ( nl > 18) { 1629 c = cngetc(); 1630 if (c != ' ') 1631 return; 1632 nl = 0; 1633 } 1634 nl++; 1635 } 1636 } 1637} 1638#endif /* DDB */ 1639