vm_object.c revision 32071
1/* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $Id: vm_object.c,v 1.102 1997/12/19 09:03:14 dyson Exp $ 65 */ 66 67/* 68 * Virtual memory object module. 69 */ 70 71#include <sys/param.h> 72#include <sys/systm.h> 73#include <sys/proc.h> /* for curproc, pageproc */ 74#include <sys/vnode.h> 75#include <sys/vmmeter.h> 76#include <sys/mman.h> 77 78#include <vm/vm.h> 79#include <vm/vm_param.h> 80#include <vm/vm_prot.h> 81#include <sys/lock.h> 82#include <vm/pmap.h> 83#include <vm/vm_map.h> 84#include <vm/vm_object.h> 85#include <vm/vm_page.h> 86#include <vm/vm_pageout.h> 87#include <vm/vm_pager.h> 88#include <vm/swap_pager.h> 89#include <vm/vm_kern.h> 90#include <vm/vm_extern.h> 91#include <vm/vm_zone.h> 92 93static void vm_object_qcollapse __P((vm_object_t object)); 94#ifdef not_used 95static void vm_object_deactivate_pages __P((vm_object_t)); 96#endif 97static void vm_object_terminate __P((vm_object_t)); 98 99/* 100 * Virtual memory objects maintain the actual data 101 * associated with allocated virtual memory. A given 102 * page of memory exists within exactly one object. 103 * 104 * An object is only deallocated when all "references" 105 * are given up. Only one "reference" to a given 106 * region of an object should be writeable. 107 * 108 * Associated with each object is a list of all resident 109 * memory pages belonging to that object; this list is 110 * maintained by the "vm_page" module, and locked by the object's 111 * lock. 112 * 113 * Each object also records a "pager" routine which is 114 * used to retrieve (and store) pages to the proper backing 115 * storage. In addition, objects may be backed by other 116 * objects from which they were virtual-copied. 117 * 118 * The only items within the object structure which are 119 * modified after time of creation are: 120 * reference count locked by object's lock 121 * pager routine locked by object's lock 122 * 123 */ 124 125struct object_q vm_object_list; 126struct simplelock vm_object_list_lock; 127static long vm_object_count; /* count of all objects */ 128vm_object_t kernel_object; 129vm_object_t kmem_object; 130static struct vm_object kernel_object_store; 131static struct vm_object kmem_object_store; 132extern int vm_pageout_page_count; 133 134static long object_collapses; 135static long object_bypasses; 136static int next_index; 137static vm_zone_t obj_zone; 138static struct vm_zone obj_zone_store; 139#define VM_OBJECTS_INIT 256 140struct vm_object vm_objects_init[VM_OBJECTS_INIT]; 141 142void 143_vm_object_allocate(type, size, object) 144 objtype_t type; 145 vm_size_t size; 146 register vm_object_t object; 147{ 148 int incr; 149 TAILQ_INIT(&object->memq); 150 TAILQ_INIT(&object->shadow_head); 151 152 object->type = type; 153 object->size = size; 154 object->ref_count = 1; 155 object->flags = 0; 156 object->behavior = OBJ_NORMAL; 157 object->paging_in_progress = 0; 158 object->resident_page_count = 0; 159 object->shadow_count = 0; 160 object->pg_color = next_index; 161 if ( size > (PQ_L2_SIZE / 3 + PQ_PRIME1)) 162 incr = PQ_L2_SIZE / 3 + PQ_PRIME1; 163 else 164 incr = size; 165 next_index = (next_index + incr) & PQ_L2_MASK; 166 object->handle = NULL; 167 object->paging_offset = (vm_ooffset_t) 0; 168 object->backing_object = NULL; 169 object->backing_object_offset = (vm_ooffset_t) 0; 170 object->page_hint = NULL; 171 172 object->last_read = 0; 173 174 TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); 175 vm_object_count++; 176} 177 178/* 179 * vm_object_init: 180 * 181 * Initialize the VM objects module. 182 */ 183void 184vm_object_init() 185{ 186 TAILQ_INIT(&vm_object_list); 187 simple_lock_init(&vm_object_list_lock); 188 vm_object_count = 0; 189 190 kernel_object = &kernel_object_store; 191 _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), 192 kernel_object); 193 194 kmem_object = &kmem_object_store; 195 _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), 196 kmem_object); 197 198 obj_zone = &obj_zone_store; 199 zbootinit(obj_zone, "VM OBJECT", sizeof (struct vm_object), 200 vm_objects_init, VM_OBJECTS_INIT); 201} 202 203void 204vm_object_init2() { 205 zinitna(obj_zone, NULL, NULL, 0, 0, 0, 1); 206} 207 208/* 209 * vm_object_allocate: 210 * 211 * Returns a new object with the given size. 212 */ 213 214vm_object_t 215vm_object_allocate(type, size) 216 objtype_t type; 217 vm_size_t size; 218{ 219 register vm_object_t result; 220 result = (vm_object_t) zalloc(obj_zone); 221 222 _vm_object_allocate(type, size, result); 223 224 return (result); 225} 226 227 228/* 229 * vm_object_reference: 230 * 231 * Gets another reference to the given object. 232 */ 233void 234vm_object_reference(object) 235 register vm_object_t object; 236{ 237 if (object == NULL) 238 return; 239 if (object->ref_count == 0) { 240 panic("vm_object_reference: attempting to reference deallocated obj"); 241 } 242 object->ref_count++; 243 if ((object->type == OBJT_VNODE) && (object->flags & OBJ_VFS_REF)) { 244 struct vnode *vp; 245 vp = (struct vnode *)object->handle; 246 simple_lock(&vp->v_interlock); 247 if (vp->v_flag & VOBJREF) 248 vp->v_flag |= VOBJREF; 249 ++vp->v_usecount; 250 simple_unlock(&vp->v_interlock); 251 } 252} 253 254/* 255 * vm_object_deallocate: 256 * 257 * Release a reference to the specified object, 258 * gained either through a vm_object_allocate 259 * or a vm_object_reference call. When all references 260 * are gone, storage associated with this object 261 * may be relinquished. 262 * 263 * No object may be locked. 264 */ 265void 266vm_object_deallocate(object) 267 vm_object_t object; 268{ 269 vm_object_t temp; 270 struct vnode *vp; 271 272 while (object != NULL) { 273 274 if (object->ref_count == 0) { 275 panic("vm_object_deallocate: object deallocated too many times"); 276 } else if (object->ref_count > 2) { 277 object->ref_count--; 278 return; 279 } 280 281 /* 282 * Here on ref_count of one or two, which are special cases for 283 * objects. 284 */ 285 vp = NULL; 286 if (object->type == OBJT_VNODE) { 287 vp = (struct vnode *)object->handle; 288 if (vp->v_flag & VOBJREF) { 289 if (object->ref_count < 2) { 290 panic("vm_object_deallocate: " 291 "not enough references for OBJT_VNODE: %d", 292 object->ref_count); 293 } else { 294 295 /* 296 * Freeze optimized copies. 297 */ 298 vm_freeze_copyopts(object, 0, object->size); 299 300 /* 301 * Loose our reference to the vnode. 302 */ 303 vp->v_flag &= ~VOBJREF; 304 vrele(vp); 305 } 306 } 307 } 308 309 /* 310 * Lose the reference 311 */ 312 if (object->ref_count == 2) { 313 object->ref_count--; 314 if ((object->handle == NULL) && 315 (object->type == OBJT_DEFAULT || 316 object->type == OBJT_SWAP)) { 317 vm_object_t robject; 318 robject = TAILQ_FIRST(&object->shadow_head); 319 if ((robject != NULL) && 320 (robject->handle == NULL) && 321 (robject->type == OBJT_DEFAULT || 322 robject->type == OBJT_SWAP)) { 323 int s; 324 robject->ref_count += 2; 325 object->ref_count += 2; 326 327 do { 328 s = splvm(); 329 while (robject->paging_in_progress) { 330 robject->flags |= OBJ_PIPWNT; 331 tsleep(robject, PVM, "objde1", 0); 332 } 333 334 while (object->paging_in_progress) { 335 object->flags |= OBJ_PIPWNT; 336 tsleep(object, PVM, "objde2", 0); 337 } 338 splx(s); 339 340 } while( object->paging_in_progress || robject->paging_in_progress); 341 342 object->ref_count -= 2; 343 robject->ref_count -= 2; 344 if( robject->ref_count == 0) { 345 robject->ref_count += 1; 346 object = robject; 347 continue; 348 } 349 vm_object_collapse(robject); 350 return; 351 } 352 } 353 /* 354 * If there are still references, then we are done. 355 */ 356 return; 357 } 358 359 /* 360 * Make sure no one uses us. 361 */ 362 object->flags |= OBJ_DEAD; 363 364 if (vp) 365 vp->v_flag &= ~VTEXT; 366 367 object->ref_count--; 368 369 temp = object->backing_object; 370 if (temp) { 371 TAILQ_REMOVE(&temp->shadow_head, object, shadow_list); 372 --temp->shadow_count; 373 } 374 vm_object_terminate(object); 375 /* unlocks and deallocates object */ 376 object = temp; 377 } 378} 379 380/* 381 * vm_object_terminate actually destroys the specified object, freeing 382 * up all previously used resources. 383 * 384 * The object must be locked. 385 */ 386static void 387vm_object_terminate(object) 388 register vm_object_t object; 389{ 390 register vm_page_t p; 391 int s; 392 393 if (object->flags & OBJ_VFS_REF) 394 panic("vm_object_deallocate: freeing VFS_REF'ed object"); 395 396 /* 397 * wait for the pageout daemon to be done with the object 398 */ 399 s = splvm(); 400 while (object->paging_in_progress) { 401 object->flags |= OBJ_PIPWNT; 402 tsleep(object, PVM, "objtrm", 0); 403 } 404 splx(s); 405 406 if (object->paging_in_progress != 0) 407 panic("vm_object_deallocate: pageout in progress"); 408 409 /* 410 * Clean and free the pages, as appropriate. All references to the 411 * object are gone, so we don't need to lock it. 412 */ 413 if (object->type == OBJT_VNODE) { 414 struct vnode *vp = object->handle; 415 vm_object_page_clean(object, 0, 0, TRUE); 416 vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0); 417 } 418 419 /* 420 * Now free the pages. For internal objects, this also removes them 421 * from paging queues. 422 */ 423 while ((p = TAILQ_FIRST(&object->memq)) != NULL) { 424 if (p->busy || (p->flags & PG_BUSY)) 425 printf("vm_object_terminate: freeing busy page\n"); 426 PAGE_WAKEUP(p); 427 vm_page_free(p); 428 cnt.v_pfree++; 429 } 430 431 /* 432 * Let the pager know object is dead. 433 */ 434 vm_pager_deallocate(object); 435 436 simple_lock(&vm_object_list_lock); 437 TAILQ_REMOVE(&vm_object_list, object, object_list); 438 vm_object_count--; 439 simple_unlock(&vm_object_list_lock); 440 441 wakeup(object); 442 443 /* 444 * Free the space for the object. 445 */ 446 zfree(obj_zone, object); 447} 448 449/* 450 * vm_object_page_clean 451 * 452 * Clean all dirty pages in the specified range of object. 453 * Leaves page on whatever queue it is currently on. 454 * 455 * Odd semantics: if start == end, we clean everything. 456 * 457 * The object must be locked. 458 */ 459 460void 461vm_object_page_clean(object, start, end, syncio) 462 vm_object_t object; 463 vm_pindex_t start; 464 vm_pindex_t end; 465 boolean_t syncio; 466{ 467 register vm_page_t p, np, tp; 468 register vm_offset_t tstart, tend; 469 vm_pindex_t pi; 470 int s; 471 struct vnode *vp; 472 int runlen; 473 int maxf; 474 int chkb; 475 int maxb; 476 int i; 477 vm_page_t maf[vm_pageout_page_count]; 478 vm_page_t mab[vm_pageout_page_count]; 479 vm_page_t ma[vm_pageout_page_count]; 480 struct proc *pproc = curproc; /* XXX */ 481 482 if (object->type != OBJT_VNODE || 483 (object->flags & OBJ_MIGHTBEDIRTY) == 0) 484 return; 485 486 vp = object->handle; 487 488 object->flags |= OBJ_CLEANING; 489 490 tstart = start; 491 if (end == 0) { 492 tend = object->size; 493 } else { 494 tend = end; 495 } 496 if ((tstart == 0) && (tend == object->size)) { 497 object->flags &= ~(OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY); 498 } 499 for(p = TAILQ_FIRST(&object->memq); p; p = TAILQ_NEXT(p, listq)) 500 p->flags |= PG_CLEANCHK; 501 502rescan: 503 for(p = TAILQ_FIRST(&object->memq); p; p = np) { 504 np = TAILQ_NEXT(p, listq); 505 506 pi = p->pindex; 507 if (((p->flags & PG_CLEANCHK) == 0) || 508 (pi < tstart) || (pi >= tend) || 509 (p->valid == 0) || 510 ((p->queue - p->pc) == PQ_CACHE)) { 511 p->flags &= ~PG_CLEANCHK; 512 continue; 513 } 514 515 vm_page_test_dirty(p); 516 if ((p->dirty & p->valid) == 0) { 517 p->flags &= ~PG_CLEANCHK; 518 continue; 519 } 520 521 s = splvm(); 522 if ((p->flags & PG_BUSY) || p->busy) { 523 p->flags |= PG_WANTED|PG_REFERENCED; 524 tsleep(p, PVM, "vpcwai", 0); 525 splx(s); 526 goto rescan; 527 } 528 splx(s); 529 530 s = splvm(); 531 maxf = 0; 532 for(i=1;i<vm_pageout_page_count;i++) { 533 if (tp = vm_page_lookup(object, pi + i)) { 534 if ((tp->flags & PG_BUSY) || 535 (tp->flags & PG_CLEANCHK) == 0) 536 break; 537 if((tp->queue - tp->pc) == PQ_CACHE) { 538 tp->flags &= ~PG_CLEANCHK; 539 break; 540 } 541 vm_page_test_dirty(tp); 542 if ((tp->dirty & tp->valid) == 0) { 543 tp->flags &= ~PG_CLEANCHK; 544 break; 545 } 546 maf[ i - 1 ] = tp; 547 maxf++; 548 continue; 549 } 550 break; 551 } 552 553 maxb = 0; 554 chkb = vm_pageout_page_count - maxf; 555 if (chkb) { 556 for(i = 1; i < chkb;i++) { 557 if (tp = vm_page_lookup(object, pi - i)) { 558 if ((tp->flags & PG_BUSY) || 559 (tp->flags & PG_CLEANCHK) == 0) 560 break; 561 if((tp->queue - tp->pc) == PQ_CACHE) { 562 tp->flags &= ~PG_CLEANCHK; 563 break; 564 } 565 vm_page_test_dirty(tp); 566 if ((tp->dirty & tp->valid) == 0) { 567 tp->flags &= ~PG_CLEANCHK; 568 break; 569 } 570 mab[ i - 1 ] = tp; 571 maxb++; 572 continue; 573 } 574 break; 575 } 576 } 577 578 for(i=0;i<maxb;i++) { 579 int index = (maxb - i) - 1; 580 ma[index] = mab[i]; 581 ma[index]->flags |= PG_BUSY; 582 ma[index]->flags &= ~PG_CLEANCHK; 583 vm_page_protect(ma[index], VM_PROT_READ); 584 } 585 vm_page_protect(p, VM_PROT_READ); 586 p->flags |= PG_BUSY; 587 p->flags &= ~PG_CLEANCHK; 588 ma[maxb] = p; 589 for(i=0;i<maxf;i++) { 590 int index = (maxb + i) + 1; 591 ma[index] = maf[i]; 592 ma[index]->flags |= PG_BUSY; 593 ma[index]->flags &= ~PG_CLEANCHK; 594 vm_page_protect(ma[index], VM_PROT_READ); 595 } 596 runlen = maxb + maxf + 1; 597 splx(s); 598 vm_pageout_flush(ma, runlen, 0); 599 goto rescan; 600 } 601 602 VOP_FSYNC(vp, NULL, syncio, curproc); 603 604 object->flags &= ~OBJ_CLEANING; 605 return; 606} 607 608#ifdef not_used 609/* XXX I cannot tell if this should be an exported symbol */ 610/* 611 * vm_object_deactivate_pages 612 * 613 * Deactivate all pages in the specified object. (Keep its pages 614 * in memory even though it is no longer referenced.) 615 * 616 * The object must be locked. 617 */ 618static void 619vm_object_deactivate_pages(object) 620 register vm_object_t object; 621{ 622 register vm_page_t p, next; 623 624 for (p = TAILQ_FIRST(&object->memq); p != NULL; p = next) { 625 next = TAILQ_NEXT(p, listq); 626 vm_page_deactivate(p); 627 } 628} 629#endif 630 631/* 632 * vm_object_pmap_copy: 633 * 634 * Makes all physical pages in the specified 635 * object range copy-on-write. No writeable 636 * references to these pages should remain. 637 * 638 * The object must *not* be locked. 639 */ 640void 641vm_object_pmap_copy(object, start, end) 642 register vm_object_t object; 643 register vm_pindex_t start; 644 register vm_pindex_t end; 645{ 646 register vm_page_t p; 647 648 if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0) 649 return; 650 651 for (p = TAILQ_FIRST(&object->memq); 652 p != NULL; 653 p = TAILQ_NEXT(p, listq)) { 654 vm_page_protect(p, VM_PROT_READ); 655 } 656 657 object->flags &= ~OBJ_WRITEABLE; 658} 659 660/* 661 * Same as vm_object_pmap_copy_1, except range checking really 662 * works, and is meant for small sections of an object. 663 */ 664void 665vm_object_pmap_copy_1(object, start, end) 666 register vm_object_t object; 667 register vm_pindex_t start; 668 register vm_pindex_t end; 669{ 670 vm_pindex_t idx; 671 register vm_page_t p; 672 673 if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0) 674 return; 675 676 for (idx = start; idx < end; idx++) { 677 p = vm_page_lookup(object, idx); 678 if (p == NULL) 679 continue; 680 vm_page_protect(p, VM_PROT_READ); 681 } 682} 683 684/* 685 * vm_object_pmap_remove: 686 * 687 * Removes all physical pages in the specified 688 * object range from all physical maps. 689 * 690 * The object must *not* be locked. 691 */ 692void 693vm_object_pmap_remove(object, start, end) 694 register vm_object_t object; 695 register vm_pindex_t start; 696 register vm_pindex_t end; 697{ 698 register vm_page_t p; 699 if (object == NULL) 700 return; 701 for (p = TAILQ_FIRST(&object->memq); 702 p != NULL; 703 p = TAILQ_NEXT(p, listq)) { 704 if (p->pindex >= start && p->pindex < end) 705 vm_page_protect(p, VM_PROT_NONE); 706 } 707 if ((start == 0) && (object->size == end)) 708 object->flags &= ~OBJ_WRITEABLE; 709} 710 711/* 712 * vm_object_madvise: 713 * 714 * Implements the madvise function at the object/page level. 715 */ 716void 717vm_object_madvise(object, pindex, count, advise) 718 vm_object_t object; 719 vm_pindex_t pindex; 720 int count; 721 int advise; 722{ 723 int s; 724 vm_pindex_t end, tpindex; 725 vm_object_t tobject; 726 vm_page_t m; 727 728 if (object == NULL) 729 return; 730 731 end = pindex + count; 732 733 for (; pindex < end; pindex += 1) { 734 735relookup: 736 tobject = object; 737 tpindex = pindex; 738shadowlookup: 739 m = vm_page_lookup(tobject, tpindex); 740 if (m == NULL) { 741 if (tobject->type != OBJT_DEFAULT) { 742 continue; 743 } 744 745 tobject = tobject->backing_object; 746 if ((tobject == NULL) || (tobject->ref_count != 1)) { 747 continue; 748 } 749 tpindex += OFF_TO_IDX(tobject->backing_object_offset); 750 goto shadowlookup; 751 } 752 753 /* 754 * If the page is busy or not in a normal active state, 755 * we skip it. Things can break if we mess with pages 756 * in any of the below states. 757 */ 758 if (m->hold_count || m->wire_count || 759 m->valid != VM_PAGE_BITS_ALL) { 760 continue; 761 } 762 763 if (m->busy || (m->flags & PG_BUSY)) { 764 s = splvm(); 765 if (m->busy || (m->flags & PG_BUSY)) { 766 m->flags |= PG_WANTED; 767 tsleep(m, PVM, "madvpw", 0); 768 } 769 splx(s); 770 goto relookup; 771 } 772 773 if (advise == MADV_WILLNEED) { 774 if (m->queue != PQ_ACTIVE) 775 vm_page_activate(m); 776 } else if (advise == MADV_DONTNEED) { 777 vm_page_deactivate(m); 778 } else if (advise == MADV_FREE) { 779 pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 780 m->dirty = 0; 781 /* 782 * Force a demand zero if attempt to read from swap. 783 * We currently don't handle vnode files correctly, 784 * and will reread stale contents unnecessarily. 785 */ 786 if (object->type == OBJT_SWAP) 787 swap_pager_dmzspace(tobject, m->pindex, 1); 788 } 789 } 790} 791 792/* 793 * vm_object_shadow: 794 * 795 * Create a new object which is backed by the 796 * specified existing object range. The source 797 * object reference is deallocated. 798 * 799 * The new object and offset into that object 800 * are returned in the source parameters. 801 */ 802 803void 804vm_object_shadow(object, offset, length) 805 vm_object_t *object; /* IN/OUT */ 806 vm_ooffset_t *offset; /* IN/OUT */ 807 vm_size_t length; 808{ 809 register vm_object_t source; 810 register vm_object_t result; 811 812 source = *object; 813 814 /* 815 * Allocate a new object with the given length 816 */ 817 818 if ((result = vm_object_allocate(OBJT_DEFAULT, length)) == NULL) 819 panic("vm_object_shadow: no object for shadowing"); 820 821 /* 822 * The new object shadows the source object, adding a reference to it. 823 * Our caller changes his reference to point to the new object, 824 * removing a reference to the source object. Net result: no change 825 * of reference count. 826 */ 827 result->backing_object = source; 828 if (source) { 829 TAILQ_INSERT_TAIL(&source->shadow_head, result, shadow_list); 830 ++source->shadow_count; 831 } 832 833 /* 834 * Store the offset into the source object, and fix up the offset into 835 * the new object. 836 */ 837 838 result->backing_object_offset = *offset; 839 840 /* 841 * Return the new things 842 */ 843 844 *offset = 0; 845 *object = result; 846} 847 848 849/* 850 * this version of collapse allows the operation to occur earlier and 851 * when paging_in_progress is true for an object... This is not a complete 852 * operation, but should plug 99.9% of the rest of the leaks. 853 */ 854static void 855vm_object_qcollapse(object) 856 register vm_object_t object; 857{ 858 register vm_object_t backing_object; 859 register vm_pindex_t backing_offset_index, paging_offset_index; 860 vm_pindex_t backing_object_paging_offset_index; 861 vm_pindex_t new_pindex; 862 register vm_page_t p, pp; 863 register vm_size_t size; 864 865 backing_object = object->backing_object; 866 if (backing_object->ref_count != 1) 867 return; 868 869 backing_object->ref_count += 2; 870 871 backing_offset_index = OFF_TO_IDX(object->backing_object_offset); 872 backing_object_paging_offset_index = OFF_TO_IDX(backing_object->paging_offset); 873 paging_offset_index = OFF_TO_IDX(object->paging_offset); 874 size = object->size; 875 p = TAILQ_FIRST(&backing_object->memq); 876 while (p) { 877 vm_page_t next; 878 879 next = TAILQ_NEXT(p, listq); 880 if ((p->flags & (PG_BUSY | PG_FICTITIOUS)) || 881 ((p->queue - p->pc) == PQ_CACHE) || 882 !p->valid || p->hold_count || p->wire_count || p->busy) { 883 p = next; 884 continue; 885 } 886 new_pindex = p->pindex - backing_offset_index; 887 if (p->pindex < backing_offset_index || 888 new_pindex >= size) { 889 if (backing_object->type == OBJT_SWAP) 890 swap_pager_freespace(backing_object, 891 backing_object_paging_offset_index+p->pindex, 892 1); 893 vm_page_protect(p, VM_PROT_NONE); 894 vm_page_free(p); 895 } else { 896 pp = vm_page_lookup(object, new_pindex); 897 if (pp != NULL || (object->type == OBJT_SWAP && vm_pager_has_page(object, 898 paging_offset_index + new_pindex, NULL, NULL))) { 899 if (backing_object->type == OBJT_SWAP) 900 swap_pager_freespace(backing_object, 901 backing_object_paging_offset_index + p->pindex, 1); 902 vm_page_protect(p, VM_PROT_NONE); 903 vm_page_free(p); 904 } else { 905 if (backing_object->type == OBJT_SWAP) 906 swap_pager_freespace(backing_object, 907 backing_object_paging_offset_index + p->pindex, 1); 908 vm_page_rename(p, object, new_pindex); 909 vm_page_protect(p, VM_PROT_NONE); 910 p->dirty = VM_PAGE_BITS_ALL; 911 } 912 } 913 p = next; 914 } 915 backing_object->ref_count -= 2; 916} 917 918/* 919 * vm_object_collapse: 920 * 921 * Collapse an object with the object backing it. 922 * Pages in the backing object are moved into the 923 * parent, and the backing object is deallocated. 924 */ 925void 926vm_object_collapse(object) 927 vm_object_t object; 928 929{ 930 vm_object_t backing_object; 931 vm_ooffset_t backing_offset; 932 vm_size_t size; 933 vm_pindex_t new_pindex, backing_offset_index; 934 vm_page_t p, pp; 935 936 while (TRUE) { 937 /* 938 * Verify that the conditions are right for collapse: 939 * 940 * The object exists and no pages in it are currently being paged 941 * out. 942 */ 943 if (object == NULL) 944 return; 945 946 /* 947 * Make sure there is a backing object. 948 */ 949 if ((backing_object = object->backing_object) == NULL) 950 return; 951 952 /* 953 * we check the backing object first, because it is most likely 954 * not collapsable. 955 */ 956 if (backing_object->handle != NULL || 957 (backing_object->type != OBJT_DEFAULT && 958 backing_object->type != OBJT_SWAP) || 959 (backing_object->flags & OBJ_DEAD) || 960 object->handle != NULL || 961 (object->type != OBJT_DEFAULT && 962 object->type != OBJT_SWAP) || 963 (object->flags & OBJ_DEAD)) { 964 return; 965 } 966 967 if (object->paging_in_progress != 0 || 968 backing_object->paging_in_progress != 0) { 969 vm_object_qcollapse(object); 970 return; 971 } 972 973 /* 974 * We know that we can either collapse the backing object (if 975 * the parent is the only reference to it) or (perhaps) remove 976 * the parent's reference to it. 977 */ 978 979 backing_offset = object->backing_object_offset; 980 backing_offset_index = OFF_TO_IDX(backing_offset); 981 size = object->size; 982 983 /* 984 * If there is exactly one reference to the backing object, we 985 * can collapse it into the parent. 986 */ 987 988 if (backing_object->ref_count == 1) { 989 990 backing_object->flags |= OBJ_DEAD; 991 /* 992 * We can collapse the backing object. 993 * 994 * Move all in-memory pages from backing_object to the 995 * parent. Pages that have been paged out will be 996 * overwritten by any of the parent's pages that 997 * shadow them. 998 */ 999 1000 while ((p = TAILQ_FIRST(&backing_object->memq)) != 0) { 1001 1002 new_pindex = p->pindex - backing_offset_index; 1003 1004 /* 1005 * If the parent has a page here, or if this 1006 * page falls outside the parent, dispose of 1007 * it. 1008 * 1009 * Otherwise, move it as planned. 1010 */ 1011 1012 if (p->pindex < backing_offset_index || 1013 new_pindex >= size) { 1014 vm_page_protect(p, VM_PROT_NONE); 1015 PAGE_WAKEUP(p); 1016 vm_page_free(p); 1017 } else { 1018 pp = vm_page_lookup(object, new_pindex); 1019 if (pp != NULL || (object->type == OBJT_SWAP && vm_pager_has_page(object, 1020 OFF_TO_IDX(object->paging_offset) + new_pindex, NULL, NULL))) { 1021 vm_page_protect(p, VM_PROT_NONE); 1022 PAGE_WAKEUP(p); 1023 vm_page_free(p); 1024 } else { 1025 vm_page_protect(p, VM_PROT_NONE); 1026 vm_page_rename(p, object, new_pindex); 1027 p->dirty = VM_PAGE_BITS_ALL; 1028 } 1029 } 1030 } 1031 1032 /* 1033 * Move the pager from backing_object to object. 1034 */ 1035 1036 if (backing_object->type == OBJT_SWAP) { 1037 backing_object->paging_in_progress++; 1038 if (object->type == OBJT_SWAP) { 1039 object->paging_in_progress++; 1040 /* 1041 * copy shadow object pages into ours 1042 * and destroy unneeded pages in 1043 * shadow object. 1044 */ 1045 swap_pager_copy( 1046 backing_object, 1047 OFF_TO_IDX(backing_object->paging_offset), 1048 object, 1049 OFF_TO_IDX(object->paging_offset), 1050 OFF_TO_IDX(object->backing_object_offset)); 1051 vm_object_pip_wakeup(object); 1052 } else { 1053 object->paging_in_progress++; 1054 /* 1055 * move the shadow backing_object's pager data to 1056 * "object" and convert "object" type to OBJT_SWAP. 1057 */ 1058 object->type = OBJT_SWAP; 1059 object->un_pager.swp.swp_nblocks = 1060 backing_object->un_pager.swp.swp_nblocks; 1061 object->un_pager.swp.swp_allocsize = 1062 backing_object->un_pager.swp.swp_allocsize; 1063 object->un_pager.swp.swp_blocks = 1064 backing_object->un_pager.swp.swp_blocks; 1065 object->un_pager.swp.swp_poip = /* XXX */ 1066 backing_object->un_pager.swp.swp_poip; 1067 object->paging_offset = backing_object->paging_offset + backing_offset; 1068 TAILQ_INSERT_TAIL(&swap_pager_un_object_list, object, pager_object_list); 1069 1070 /* 1071 * Convert backing object from OBJT_SWAP to 1072 * OBJT_DEFAULT. XXX - only the TAILQ_REMOVE is 1073 * actually necessary. 1074 */ 1075 backing_object->type = OBJT_DEFAULT; 1076 TAILQ_REMOVE(&swap_pager_un_object_list, backing_object, pager_object_list); 1077 /* 1078 * free unnecessary blocks 1079 */ 1080 swap_pager_freespace(object, 0, 1081 OFF_TO_IDX(object->paging_offset)); 1082 vm_object_pip_wakeup(object); 1083 } 1084 1085 vm_object_pip_wakeup(backing_object); 1086 } 1087 /* 1088 * Object now shadows whatever backing_object did. 1089 * Note that the reference to backing_object->backing_object 1090 * moves from within backing_object to within object. 1091 */ 1092 1093 TAILQ_REMOVE(&object->backing_object->shadow_head, object, 1094 shadow_list); 1095 --object->backing_object->shadow_count; 1096 if (backing_object->backing_object) { 1097 TAILQ_REMOVE(&backing_object->backing_object->shadow_head, 1098 backing_object, shadow_list); 1099 --backing_object->backing_object->shadow_count; 1100 } 1101 object->backing_object = backing_object->backing_object; 1102 if (object->backing_object) { 1103 TAILQ_INSERT_TAIL(&object->backing_object->shadow_head, 1104 object, shadow_list); 1105 ++object->backing_object->shadow_count; 1106 } 1107 1108 object->backing_object_offset += backing_object->backing_object_offset; 1109 /* 1110 * Discard backing_object. 1111 * 1112 * Since the backing object has no pages, no pager left, 1113 * and no object references within it, all that is 1114 * necessary is to dispose of it. 1115 */ 1116 1117 TAILQ_REMOVE(&vm_object_list, backing_object, 1118 object_list); 1119 vm_object_count--; 1120 1121 zfree(obj_zone, backing_object); 1122 1123 object_collapses++; 1124 } else { 1125 /* 1126 * If all of the pages in the backing object are 1127 * shadowed by the parent object, the parent object no 1128 * longer has to shadow the backing object; it can 1129 * shadow the next one in the chain. 1130 * 1131 * The backing object must not be paged out - we'd have 1132 * to check all of the paged-out pages, as well. 1133 */ 1134 1135 if (backing_object->type != OBJT_DEFAULT) { 1136 return; 1137 } 1138 /* 1139 * Should have a check for a 'small' number of pages 1140 * here. 1141 */ 1142 1143 for (p = TAILQ_FIRST(&backing_object->memq); p; p = TAILQ_NEXT(p, listq)) { 1144 new_pindex = p->pindex - backing_offset_index; 1145 1146 /* 1147 * If the parent has a page here, or if this 1148 * page falls outside the parent, keep going. 1149 * 1150 * Otherwise, the backing_object must be left in 1151 * the chain. 1152 */ 1153 1154 if (p->pindex >= backing_offset_index && 1155 new_pindex <= size) { 1156 1157 pp = vm_page_lookup(object, new_pindex); 1158 1159 if ((pp == NULL || pp->valid == 0) && 1160 !vm_pager_has_page(object, OFF_TO_IDX(object->paging_offset) + new_pindex, NULL, NULL)) { 1161 /* 1162 * Page still needed. Can't go any 1163 * further. 1164 */ 1165 return; 1166 } 1167 } 1168 } 1169 1170 /* 1171 * Make the parent shadow the next object in the 1172 * chain. Deallocating backing_object will not remove 1173 * it, since its reference count is at least 2. 1174 */ 1175 1176 TAILQ_REMOVE(&object->backing_object->shadow_head, 1177 object, shadow_list); 1178 --object->backing_object->shadow_count; 1179 vm_object_reference(object->backing_object = backing_object->backing_object); 1180 if (object->backing_object) { 1181 TAILQ_INSERT_TAIL(&object->backing_object->shadow_head, 1182 object, shadow_list); 1183 ++object->backing_object->shadow_count; 1184 } 1185 object->backing_object_offset += backing_object->backing_object_offset; 1186 1187 /* 1188 * Drop the reference count on backing_object. Since 1189 * its ref_count was at least 2, it will not vanish; 1190 * so we don't need to call vm_object_deallocate. 1191 */ 1192 if (backing_object->ref_count == 1) 1193 printf("should have called obj deallocate\n"); 1194 backing_object->ref_count--; 1195 1196 object_bypasses++; 1197 1198 } 1199 1200 /* 1201 * Try again with this object's new backing object. 1202 */ 1203 } 1204} 1205 1206/* 1207 * vm_object_page_remove: [internal] 1208 * 1209 * Removes all physical pages in the specified 1210 * object range from the object's list of pages. 1211 * 1212 * The object must be locked. 1213 */ 1214void 1215vm_object_page_remove(object, start, end, clean_only) 1216 register vm_object_t object; 1217 register vm_pindex_t start; 1218 register vm_pindex_t end; 1219 boolean_t clean_only; 1220{ 1221 register vm_page_t p, next; 1222 unsigned int size; 1223 int s; 1224 1225 if (object == NULL) 1226 return; 1227 1228 object->paging_in_progress++; 1229again: 1230 size = end - start; 1231 if (size > 4 || size >= object->size / 4) { 1232 for (p = TAILQ_FIRST(&object->memq); p != NULL; p = next) { 1233 next = TAILQ_NEXT(p, listq); 1234 if ((start <= p->pindex) && (p->pindex < end)) { 1235 if (p->wire_count != 0) { 1236 vm_page_protect(p, VM_PROT_NONE); 1237 p->valid = 0; 1238 continue; 1239 } 1240 1241 /* 1242 * The busy flags are only cleared at 1243 * interrupt -- minimize the spl transitions 1244 */ 1245 if ((p->flags & PG_BUSY) || p->busy) { 1246 s = splvm(); 1247 if ((p->flags & PG_BUSY) || p->busy) { 1248 p->flags |= PG_WANTED; 1249 tsleep(p, PVM, "vmopar", 0); 1250 splx(s); 1251 goto again; 1252 } 1253 splx(s); 1254 } 1255 1256 if (clean_only) { 1257 vm_page_test_dirty(p); 1258 if (p->valid & p->dirty) 1259 continue; 1260 } 1261 vm_page_protect(p, VM_PROT_NONE); 1262 PAGE_WAKEUP(p); 1263 vm_page_free(p); 1264 } 1265 } 1266 } else { 1267 while (size > 0) { 1268 if ((p = vm_page_lookup(object, start)) != 0) { 1269 if (p->wire_count != 0) { 1270 p->valid = 0; 1271 vm_page_protect(p, VM_PROT_NONE); 1272 start += 1; 1273 size -= 1; 1274 continue; 1275 } 1276 /* 1277 * The busy flags are only cleared at 1278 * interrupt -- minimize the spl transitions 1279 */ 1280 if ((p->flags & PG_BUSY) || p->busy) { 1281 s = splvm(); 1282 if ((p->flags & PG_BUSY) || p->busy) { 1283 p->flags |= PG_WANTED; 1284 tsleep(p, PVM, "vmopar", 0); 1285 splx(s); 1286 goto again; 1287 } 1288 splx(s); 1289 } 1290 if (clean_only) { 1291 vm_page_test_dirty(p); 1292 if (p->valid & p->dirty) { 1293 start += 1; 1294 size -= 1; 1295 continue; 1296 } 1297 } 1298 vm_page_protect(p, VM_PROT_NONE); 1299 PAGE_WAKEUP(p); 1300 vm_page_free(p); 1301 } 1302 start += 1; 1303 size -= 1; 1304 } 1305 } 1306 vm_object_pip_wakeup(object); 1307} 1308 1309/* 1310 * Routine: vm_object_coalesce 1311 * Function: Coalesces two objects backing up adjoining 1312 * regions of memory into a single object. 1313 * 1314 * returns TRUE if objects were combined. 1315 * 1316 * NOTE: Only works at the moment if the second object is NULL - 1317 * if it's not, which object do we lock first? 1318 * 1319 * Parameters: 1320 * prev_object First object to coalesce 1321 * prev_offset Offset into prev_object 1322 * next_object Second object into coalesce 1323 * next_offset Offset into next_object 1324 * 1325 * prev_size Size of reference to prev_object 1326 * next_size Size of reference to next_object 1327 * 1328 * Conditions: 1329 * The object must *not* be locked. 1330 */ 1331boolean_t 1332vm_object_coalesce(prev_object, prev_pindex, prev_size, next_size) 1333 register vm_object_t prev_object; 1334 vm_pindex_t prev_pindex; 1335 vm_size_t prev_size, next_size; 1336{ 1337 vm_size_t newsize; 1338 1339 if (prev_object == NULL) { 1340 return (TRUE); 1341 } 1342 1343 if (prev_object->type != OBJT_DEFAULT) { 1344 return (FALSE); 1345 } 1346 1347 /* 1348 * Try to collapse the object first 1349 */ 1350 vm_object_collapse(prev_object); 1351 1352 /* 1353 * Can't coalesce if: . more than one reference . paged out . shadows 1354 * another object . has a copy elsewhere (any of which mean that the 1355 * pages not mapped to prev_entry may be in use anyway) 1356 */ 1357 1358 if (prev_object->backing_object != NULL) { 1359 return (FALSE); 1360 } 1361 1362 prev_size >>= PAGE_SHIFT; 1363 next_size >>= PAGE_SHIFT; 1364 1365 if ((prev_object->ref_count > 1) && 1366 (prev_object->size != prev_pindex + prev_size)) { 1367 return (FALSE); 1368 } 1369 1370 /* 1371 * Remove any pages that may still be in the object from a previous 1372 * deallocation. 1373 */ 1374 1375 vm_object_page_remove(prev_object, 1376 prev_pindex + prev_size, 1377 prev_pindex + prev_size + next_size, FALSE); 1378 1379 /* 1380 * Extend the object if necessary. 1381 */ 1382 newsize = prev_pindex + prev_size + next_size; 1383 if (newsize > prev_object->size) 1384 prev_object->size = newsize; 1385 1386 return (TRUE); 1387} 1388 1389#include "opt_ddb.h" 1390#ifdef DDB 1391#include <sys/kernel.h> 1392 1393#include <machine/cons.h> 1394 1395#include <ddb/ddb.h> 1396 1397static int _vm_object_in_map __P((vm_map_t map, vm_object_t object, 1398 vm_map_entry_t entry)); 1399static int vm_object_in_map __P((vm_object_t object)); 1400 1401static int 1402_vm_object_in_map(map, object, entry) 1403 vm_map_t map; 1404 vm_object_t object; 1405 vm_map_entry_t entry; 1406{ 1407 vm_map_t tmpm; 1408 vm_map_entry_t tmpe; 1409 vm_object_t obj; 1410 int entcount; 1411 1412 if (map == 0) 1413 return 0; 1414 1415 if (entry == 0) { 1416 tmpe = map->header.next; 1417 entcount = map->nentries; 1418 while (entcount-- && (tmpe != &map->header)) { 1419 if( _vm_object_in_map(map, object, tmpe)) { 1420 return 1; 1421 } 1422 tmpe = tmpe->next; 1423 } 1424 } else if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) { 1425 tmpm = entry->object.share_map; 1426 tmpe = tmpm->header.next; 1427 entcount = tmpm->nentries; 1428 while (entcount-- && tmpe != &tmpm->header) { 1429 if( _vm_object_in_map(tmpm, object, tmpe)) { 1430 return 1; 1431 } 1432 tmpe = tmpe->next; 1433 } 1434 } else if (obj = entry->object.vm_object) { 1435 for(; obj; obj=obj->backing_object) 1436 if( obj == object) { 1437 return 1; 1438 } 1439 } 1440 return 0; 1441} 1442 1443static int 1444vm_object_in_map( object) 1445 vm_object_t object; 1446{ 1447 struct proc *p; 1448 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 1449 if( !p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */) 1450 continue; 1451 if( _vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) 1452 return 1; 1453 } 1454 if( _vm_object_in_map( kernel_map, object, 0)) 1455 return 1; 1456 if( _vm_object_in_map( kmem_map, object, 0)) 1457 return 1; 1458 if( _vm_object_in_map( pager_map, object, 0)) 1459 return 1; 1460 if( _vm_object_in_map( buffer_map, object, 0)) 1461 return 1; 1462 if( _vm_object_in_map( io_map, object, 0)) 1463 return 1; 1464 if( _vm_object_in_map( phys_map, object, 0)) 1465 return 1; 1466 if( _vm_object_in_map( mb_map, object, 0)) 1467 return 1; 1468 if( _vm_object_in_map( u_map, object, 0)) 1469 return 1; 1470 return 0; 1471} 1472 1473DB_SHOW_COMMAND(vmochk, vm_object_check) 1474{ 1475 vm_object_t object; 1476 1477 /* 1478 * make sure that internal objs are in a map somewhere 1479 * and none have zero ref counts. 1480 */ 1481 for (object = TAILQ_FIRST(&vm_object_list); 1482 object != NULL; 1483 object = TAILQ_NEXT(object, object_list)) { 1484 if (object->handle == NULL && 1485 (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) { 1486 if (object->ref_count == 0) { 1487 db_printf("vmochk: internal obj has zero ref count: %d\n", 1488 object->size); 1489 } 1490 if (!vm_object_in_map(object)) { 1491 db_printf("vmochk: internal obj is not in a map: " 1492 "ref: %d, size: %d: 0x%x, backing_object: 0x%x\n", 1493 object->ref_count, object->size, 1494 object->size, object->backing_object); 1495 } 1496 } 1497 } 1498} 1499 1500/* 1501 * vm_object_print: [ debug ] 1502 */ 1503DB_SHOW_COMMAND(object, vm_object_print_static) 1504{ 1505 /* XXX convert args. */ 1506 vm_object_t object = (vm_object_t)addr; 1507 boolean_t full = have_addr; 1508 1509 register vm_page_t p; 1510 1511 /* XXX count is an (unused) arg. Avoid shadowing it. */ 1512#define count was_count 1513 1514 register int count; 1515 1516 if (object == NULL) 1517 return; 1518 1519 db_iprintf("Object 0x%x: size=0x%x, res=%d, ref=%d, ", 1520 (int) object, (int) object->size, 1521 object->resident_page_count, object->ref_count); 1522 db_printf("offset=0x%x, backing_object=(0x%x)+0x%x\n", 1523 (int) object->paging_offset, 1524 (int) object->backing_object, (int) object->backing_object_offset); 1525 1526 if (!full) 1527 return; 1528 1529 db_indent += 2; 1530 count = 0; 1531 for (p = TAILQ_FIRST(&object->memq); p != NULL; p = TAILQ_NEXT(p, listq)) { 1532 if (count == 0) 1533 db_iprintf("memory:="); 1534 else if (count == 6) { 1535 db_printf("\n"); 1536 db_iprintf(" ..."); 1537 count = 0; 1538 } else 1539 db_printf(","); 1540 count++; 1541 1542 db_printf("(off=0x%lx,page=0x%lx)", 1543 (u_long) p->pindex, (u_long) VM_PAGE_TO_PHYS(p)); 1544 } 1545 if (count != 0) 1546 db_printf("\n"); 1547 db_indent -= 2; 1548} 1549 1550/* XXX. */ 1551#undef count 1552 1553/* XXX need this non-static entry for calling from vm_map_print. */ 1554void 1555vm_object_print(addr, have_addr, count, modif) 1556 db_expr_t addr; 1557 boolean_t have_addr; 1558 db_expr_t count; 1559 char *modif; 1560{ 1561 vm_object_print_static(addr, have_addr, count, modif); 1562} 1563 1564DB_SHOW_COMMAND(vmopag, vm_object_print_pages) 1565{ 1566 vm_object_t object; 1567 int nl = 0; 1568 int c; 1569 for (object = TAILQ_FIRST(&vm_object_list); 1570 object != NULL; 1571 object = TAILQ_NEXT(object, object_list)) { 1572 vm_pindex_t idx, fidx; 1573 vm_pindex_t osize; 1574 vm_offset_t pa = -1, padiff; 1575 int rcount; 1576 vm_page_t m; 1577 1578 db_printf("new object: 0x%x\n", object); 1579 if ( nl > 18) { 1580 c = cngetc(); 1581 if (c != ' ') 1582 return; 1583 nl = 0; 1584 } 1585 nl++; 1586 rcount = 0; 1587 fidx = 0; 1588 osize = object->size; 1589 if (osize > 128) 1590 osize = 128; 1591 for(idx=0;idx<osize;idx++) { 1592 m = vm_page_lookup(object, idx); 1593 if (m == NULL) { 1594 if (rcount) { 1595 db_printf(" index(%d)run(%d)pa(0x%x)\n", 1596 fidx, rcount, pa); 1597 if ( nl > 18) { 1598 c = cngetc(); 1599 if (c != ' ') 1600 return; 1601 nl = 0; 1602 } 1603 nl++; 1604 rcount = 0; 1605 } 1606 continue; 1607 } 1608 1609 1610 if (rcount && 1611 (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) { 1612 ++rcount; 1613 continue; 1614 } 1615 if (rcount) { 1616 padiff = pa + rcount * PAGE_SIZE - VM_PAGE_TO_PHYS(m); 1617 padiff >>= PAGE_SHIFT; 1618 padiff &= PQ_L2_MASK; 1619 if (padiff == 0) { 1620 pa = VM_PAGE_TO_PHYS(m) - rcount * PAGE_SIZE; 1621 ++rcount; 1622 continue; 1623 } 1624 db_printf(" index(%d)run(%d)pa(0x%x)", fidx, rcount, pa); 1625 db_printf("pd(%d)\n", padiff); 1626 if ( nl > 18) { 1627 c = cngetc(); 1628 if (c != ' ') 1629 return; 1630 nl = 0; 1631 } 1632 nl++; 1633 } 1634 fidx = idx; 1635 pa = VM_PAGE_TO_PHYS(m); 1636 rcount = 1; 1637 } 1638 if (rcount) { 1639 db_printf(" index(%d)run(%d)pa(0x%x)\n", fidx, rcount, pa); 1640 if ( nl > 18) { 1641 c = cngetc(); 1642 if (c != ' ') 1643 return; 1644 nl = 0; 1645 } 1646 nl++; 1647 } 1648 } 1649} 1650#endif /* DDB */ 1651