vm_object.c revision 76117
1/* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $FreeBSD: head/sys/vm/vm_object.c 76117 2001-04-29 02:45:39Z grog $ 65 */ 66 67/* 68 * Virtual memory object module. 69 */ 70 71#include <sys/param.h> 72#include <sys/systm.h> 73#include <sys/proc.h> /* for curproc, pageproc */ 74#include <sys/vnode.h> 75#include <sys/vmmeter.h> 76#include <sys/mman.h> 77#include <sys/mount.h> 78#include <sys/mutex.h> 79#include <sys/sx.h> 80 81#include <vm/vm.h> 82#include <vm/vm_param.h> 83#include <vm/pmap.h> 84#include <vm/vm_map.h> 85#include <vm/vm_object.h> 86#include <vm/vm_page.h> 87#include <vm/vm_pageout.h> 88#include <vm/vm_pager.h> 89#include <vm/vm_zone.h> 90#include <vm/swap_pager.h> 91#include <vm/vm_kern.h> 92#include <vm/vm_extern.h> 93 94static void vm_object_qcollapse __P((vm_object_t object)); 95 96/* 97 * Virtual memory objects maintain the actual data 98 * associated with allocated virtual memory. A given 99 * page of memory exists within exactly one object. 100 * 101 * An object is only deallocated when all "references" 102 * are given up. Only one "reference" to a given 103 * region of an object should be writeable. 104 * 105 * Associated with each object is a list of all resident 106 * memory pages belonging to that object; this list is 107 * maintained by the "vm_page" module, and locked by the object's 108 * lock. 109 * 110 * Each object also records a "pager" routine which is 111 * used to retrieve (and store) pages to the proper backing 112 * storage. In addition, objects may be backed by other 113 * objects from which they were virtual-copied. 114 * 115 * The only items within the object structure which are 116 * modified after time of creation are: 117 * reference count locked by object's lock 118 * pager routine locked by object's lock 119 * 120 */ 121 122struct object_q vm_object_list; 123static struct mtx vm_object_list_mtx; /* lock for object list and count */ 124static long vm_object_count; /* count of all objects */ 125vm_object_t kernel_object; 126vm_object_t kmem_object; 127static struct vm_object kernel_object_store; 128static struct vm_object kmem_object_store; 129extern int vm_pageout_page_count; 130 131static long object_collapses; 132static long object_bypasses; 133static int next_index; 134static vm_zone_t obj_zone; 135static struct vm_zone obj_zone_store; 136static int object_hash_rand; 137#define VM_OBJECTS_INIT 256 138static struct vm_object vm_objects_init[VM_OBJECTS_INIT]; 139 140void 141_vm_object_allocate(type, size, object) 142 objtype_t type; 143 vm_size_t size; 144 vm_object_t object; 145{ 146 int incr; 147 TAILQ_INIT(&object->memq); 148 TAILQ_INIT(&object->shadow_head); 149 150 object->type = type; 151 object->size = size; 152 object->ref_count = 1; 153 object->flags = 0; 154 if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP)) 155 vm_object_set_flag(object, OBJ_ONEMAPPING); 156 object->paging_in_progress = 0; 157 object->resident_page_count = 0; 158 object->shadow_count = 0; 159 object->pg_color = next_index; 160 if ( size > (PQ_L2_SIZE / 3 + PQ_PRIME1)) 161 incr = PQ_L2_SIZE / 3 + PQ_PRIME1; 162 else 163 incr = size; 164 next_index = (next_index + incr) & PQ_L2_MASK; 165 object->handle = NULL; 166 object->backing_object = NULL; 167 object->backing_object_offset = (vm_ooffset_t) 0; 168 /* 169 * Try to generate a number that will spread objects out in the 170 * hash table. We 'wipe' new objects across the hash in 128 page 171 * increments plus 1 more to offset it a little more by the time 172 * it wraps around. 173 */ 174 object->hash_rand = object_hash_rand - 129; 175 176 object->generation++; 177 178 TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); 179 vm_object_count++; 180 object_hash_rand = object->hash_rand; 181} 182 183/* 184 * vm_object_init: 185 * 186 * Initialize the VM objects module. 187 */ 188void 189vm_object_init() 190{ 191 TAILQ_INIT(&vm_object_list); 192 mtx_init(&vm_object_list_mtx, "vm object_list", MTX_DEF); 193 vm_object_count = 0; 194 195 kernel_object = &kernel_object_store; 196 _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), 197 kernel_object); 198 199 kmem_object = &kmem_object_store; 200 _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), 201 kmem_object); 202 203 obj_zone = &obj_zone_store; 204 zbootinit(obj_zone, "VM OBJECT", sizeof (struct vm_object), 205 vm_objects_init, VM_OBJECTS_INIT); 206} 207 208void 209vm_object_init2() { 210 zinitna(obj_zone, NULL, NULL, 0, 0, 0, 1); 211} 212 213/* 214 * vm_object_allocate: 215 * 216 * Returns a new object with the given size. 217 */ 218 219vm_object_t 220vm_object_allocate(type, size) 221 objtype_t type; 222 vm_size_t size; 223{ 224 vm_object_t result; 225 226 result = (vm_object_t) zalloc(obj_zone); 227 228 _vm_object_allocate(type, size, result); 229 230 return (result); 231} 232 233 234/* 235 * vm_object_reference: 236 * 237 * Gets another reference to the given object. 238 */ 239void 240vm_object_reference(object) 241 vm_object_t object; 242{ 243 if (object == NULL) 244 return; 245 246 KASSERT(!(object->flags & OBJ_DEAD), 247 ("vm_object_reference: attempting to reference dead obj")); 248 249 object->ref_count++; 250 if (object->type == OBJT_VNODE) { 251 while (vget((struct vnode *) object->handle, LK_RETRY|LK_NOOBJ, curproc)) { 252 printf("vm_object_reference: delay in getting object\n"); 253 } 254 } 255} 256 257void 258vm_object_vndeallocate(object) 259 vm_object_t object; 260{ 261 struct vnode *vp = (struct vnode *) object->handle; 262 263 KASSERT(object->type == OBJT_VNODE, 264 ("vm_object_vndeallocate: not a vnode object")); 265 KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp")); 266#ifdef INVARIANTS 267 if (object->ref_count == 0) { 268 vprint("vm_object_vndeallocate", vp); 269 panic("vm_object_vndeallocate: bad object reference count"); 270 } 271#endif 272 273 object->ref_count--; 274 if (object->ref_count == 0) { 275 vp->v_flag &= ~VTEXT; 276 vm_object_clear_flag(object, OBJ_OPT); 277 } 278 vrele(vp); 279} 280 281/* 282 * vm_object_deallocate: 283 * 284 * Release a reference to the specified object, 285 * gained either through a vm_object_allocate 286 * or a vm_object_reference call. When all references 287 * are gone, storage associated with this object 288 * may be relinquished. 289 * 290 * No object may be locked. 291 */ 292void 293vm_object_deallocate(object) 294 vm_object_t object; 295{ 296 vm_object_t temp; 297 298 while (object != NULL) { 299 300 if (object->type == OBJT_VNODE) { 301 vm_object_vndeallocate(object); 302 return; 303 } 304 305 KASSERT(object->ref_count != 0, 306 ("vm_object_deallocate: object deallocated too many times: %d", object->type)); 307 308 /* 309 * If the reference count goes to 0 we start calling 310 * vm_object_terminate() on the object chain. 311 * A ref count of 1 may be a special case depending on the 312 * shadow count being 0 or 1. 313 */ 314 object->ref_count--; 315 if (object->ref_count > 1) { 316 return; 317 } else if (object->ref_count == 1) { 318 if (object->shadow_count == 0) { 319 vm_object_set_flag(object, OBJ_ONEMAPPING); 320 } else if ((object->shadow_count == 1) && 321 (object->handle == NULL) && 322 (object->type == OBJT_DEFAULT || 323 object->type == OBJT_SWAP)) { 324 vm_object_t robject; 325 326 robject = TAILQ_FIRST(&object->shadow_head); 327 KASSERT(robject != NULL, 328 ("vm_object_deallocate: ref_count: %d, shadow_count: %d", 329 object->ref_count, 330 object->shadow_count)); 331 if ((robject->handle == NULL) && 332 (robject->type == OBJT_DEFAULT || 333 robject->type == OBJT_SWAP)) { 334 335 robject->ref_count++; 336 337 while ( 338 robject->paging_in_progress || 339 object->paging_in_progress 340 ) { 341 vm_object_pip_sleep(robject, "objde1"); 342 vm_object_pip_sleep(object, "objde2"); 343 } 344 345 if (robject->ref_count == 1) { 346 robject->ref_count--; 347 object = robject; 348 goto doterm; 349 } 350 351 object = robject; 352 vm_object_collapse(object); 353 continue; 354 } 355 } 356 357 return; 358 359 } 360 361doterm: 362 363 temp = object->backing_object; 364 if (temp) { 365 TAILQ_REMOVE(&temp->shadow_head, object, shadow_list); 366 temp->shadow_count--; 367 if (temp->ref_count == 0) 368 vm_object_clear_flag(temp, OBJ_OPT); 369 temp->generation++; 370 object->backing_object = NULL; 371 } 372 vm_object_terminate(object); 373 /* unlocks and deallocates object */ 374 object = temp; 375 } 376} 377 378/* 379 * vm_object_terminate actually destroys the specified object, freeing 380 * up all previously used resources. 381 * 382 * The object must be locked. 383 * This routine may block. 384 */ 385void 386vm_object_terminate(object) 387 vm_object_t object; 388{ 389 vm_page_t p; 390 int s; 391 392 /* 393 * Make sure no one uses us. 394 */ 395 vm_object_set_flag(object, OBJ_DEAD); 396 397 /* 398 * wait for the pageout daemon to be done with the object 399 */ 400 vm_object_pip_wait(object, "objtrm"); 401 402 KASSERT(!object->paging_in_progress, 403 ("vm_object_terminate: pageout in progress")); 404 405 /* 406 * Clean and free the pages, as appropriate. All references to the 407 * object are gone, so we don't need to lock it. 408 */ 409 if (object->type == OBJT_VNODE) { 410 struct vnode *vp; 411 412 /* 413 * Freeze optimized copies. 414 */ 415 vm_freeze_copyopts(object, 0, object->size); 416 417 /* 418 * Clean pages and flush buffers. 419 */ 420 vm_object_page_clean(object, 0, 0, OBJPC_SYNC); 421 422 vp = (struct vnode *) object->handle; 423 vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0); 424 } 425 426 KASSERT(object->ref_count == 0, 427 ("vm_object_terminate: object with references, ref_count=%d", 428 object->ref_count)); 429 430 /* 431 * Now free any remaining pages. For internal objects, this also 432 * removes them from paging queues. Don't free wired pages, just 433 * remove them from the object. 434 */ 435 s = splvm(); 436 while ((p = TAILQ_FIRST(&object->memq)) != NULL) { 437 KASSERT(!p->busy && (p->flags & PG_BUSY) == 0, 438 ("vm_object_terminate: freeing busy page %p " 439 "p->busy = %d, p->flags %x\n", p, p->busy, p->flags)); 440 if (p->wire_count == 0) { 441 vm_page_busy(p); 442 vm_page_free(p); 443 cnt.v_pfree++; 444 } else { 445 vm_page_busy(p); 446 vm_page_remove(p); 447 } 448 } 449 splx(s); 450 451 /* 452 * Let the pager know object is dead. 453 */ 454 vm_pager_deallocate(object); 455 456 /* 457 * Remove the object from the global object list. 458 */ 459 mtx_lock(&vm_object_list_mtx); 460 TAILQ_REMOVE(&vm_object_list, object, object_list); 461 mtx_unlock(&vm_object_list_mtx); 462 463 wakeup(object); 464 465 /* 466 * Free the space for the object. 467 */ 468 zfree(obj_zone, object); 469} 470 471/* 472 * vm_object_page_clean 473 * 474 * Clean all dirty pages in the specified range of object. Leaves page 475 * on whatever queue it is currently on. If NOSYNC is set then do not 476 * write out pages with PG_NOSYNC set (originally comes from MAP_NOSYNC), 477 * leaving the object dirty. 478 * 479 * Odd semantics: if start == end, we clean everything. 480 * 481 * The object must be locked. 482 */ 483 484void 485vm_object_page_clean(object, start, end, flags) 486 vm_object_t object; 487 vm_pindex_t start; 488 vm_pindex_t end; 489 int flags; 490{ 491 vm_page_t p, np, tp; 492 vm_offset_t tstart, tend; 493 vm_pindex_t pi; 494 int s; 495 struct vnode *vp; 496 int runlen; 497 int maxf; 498 int chkb; 499 int maxb; 500 int i; 501 int clearobjflags; 502 int pagerflags; 503 vm_page_t maf[vm_pageout_page_count]; 504 vm_page_t mab[vm_pageout_page_count]; 505 vm_page_t ma[vm_pageout_page_count]; 506 int curgeneration; 507 508 if (object->type != OBJT_VNODE || 509 (object->flags & OBJ_MIGHTBEDIRTY) == 0) 510 return; 511 512 pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) ? VM_PAGER_PUT_SYNC : 0; 513 pagerflags |= (flags & OBJPC_INVAL) ? VM_PAGER_PUT_INVAL : 0; 514 515 vp = object->handle; 516 517 vm_object_set_flag(object, OBJ_CLEANING); 518 519 tstart = start; 520 if (end == 0) { 521 tend = object->size; 522 } else { 523 tend = end; 524 } 525 526 /* 527 * Generally set CLEANCHK interlock and make the page read-only so 528 * we can then clear the object flags. 529 * 530 * However, if this is a nosync mmap then the object is likely to 531 * stay dirty so do not mess with the page and do not clear the 532 * object flags. 533 */ 534 535 clearobjflags = 1; 536 537 TAILQ_FOREACH(p, &object->memq, listq) { 538 vm_page_flag_set(p, PG_CLEANCHK); 539 if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) 540 clearobjflags = 0; 541 else 542 vm_page_protect(p, VM_PROT_READ); 543 } 544 545 if (clearobjflags && (tstart == 0) && (tend == object->size)) { 546 vm_object_clear_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY); 547 } 548 549rescan: 550 curgeneration = object->generation; 551 552 for(p = TAILQ_FIRST(&object->memq); p; p = np) { 553 np = TAILQ_NEXT(p, listq); 554 555 pi = p->pindex; 556 if (((p->flags & PG_CLEANCHK) == 0) || 557 (pi < tstart) || (pi >= tend) || 558 (p->valid == 0) || 559 ((p->queue - p->pc) == PQ_CACHE)) { 560 vm_page_flag_clear(p, PG_CLEANCHK); 561 continue; 562 } 563 564 vm_page_test_dirty(p); 565 if ((p->dirty & p->valid) == 0) { 566 vm_page_flag_clear(p, PG_CLEANCHK); 567 continue; 568 } 569 570 /* 571 * If we have been asked to skip nosync pages and this is a 572 * nosync page, skip it. Note that the object flags were 573 * not cleared in this case so we do not have to set them. 574 */ 575 if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) { 576 vm_page_flag_clear(p, PG_CLEANCHK); 577 continue; 578 } 579 580 s = splvm(); 581 while (vm_page_sleep_busy(p, TRUE, "vpcwai")) { 582 if (object->generation != curgeneration) { 583 splx(s); 584 goto rescan; 585 } 586 } 587 588 maxf = 0; 589 for(i=1;i<vm_pageout_page_count;i++) { 590 if ((tp = vm_page_lookup(object, pi + i)) != NULL) { 591 if ((tp->flags & PG_BUSY) || 592 (tp->flags & PG_CLEANCHK) == 0 || 593 (tp->busy != 0)) 594 break; 595 if((tp->queue - tp->pc) == PQ_CACHE) { 596 vm_page_flag_clear(tp, PG_CLEANCHK); 597 break; 598 } 599 vm_page_test_dirty(tp); 600 if ((tp->dirty & tp->valid) == 0) { 601 vm_page_flag_clear(tp, PG_CLEANCHK); 602 break; 603 } 604 maf[ i - 1 ] = tp; 605 maxf++; 606 continue; 607 } 608 break; 609 } 610 611 maxb = 0; 612 chkb = vm_pageout_page_count - maxf; 613 if (chkb) { 614 for(i = 1; i < chkb;i++) { 615 if ((tp = vm_page_lookup(object, pi - i)) != NULL) { 616 if ((tp->flags & PG_BUSY) || 617 (tp->flags & PG_CLEANCHK) == 0 || 618 (tp->busy != 0)) 619 break; 620 if((tp->queue - tp->pc) == PQ_CACHE) { 621 vm_page_flag_clear(tp, PG_CLEANCHK); 622 break; 623 } 624 vm_page_test_dirty(tp); 625 if ((tp->dirty & tp->valid) == 0) { 626 vm_page_flag_clear(tp, PG_CLEANCHK); 627 break; 628 } 629 mab[ i - 1 ] = tp; 630 maxb++; 631 continue; 632 } 633 break; 634 } 635 } 636 637 for(i=0;i<maxb;i++) { 638 int index = (maxb - i) - 1; 639 ma[index] = mab[i]; 640 vm_page_flag_clear(ma[index], PG_CLEANCHK); 641 } 642 vm_page_flag_clear(p, PG_CLEANCHK); 643 ma[maxb] = p; 644 for(i=0;i<maxf;i++) { 645 int index = (maxb + i) + 1; 646 ma[index] = maf[i]; 647 vm_page_flag_clear(ma[index], PG_CLEANCHK); 648 } 649 runlen = maxb + maxf + 1; 650 651 splx(s); 652 vm_pageout_flush(ma, runlen, pagerflags); 653 for (i = 0; i<runlen; i++) { 654 if (ma[i]->valid & ma[i]->dirty) { 655 vm_page_protect(ma[i], VM_PROT_READ); 656 vm_page_flag_set(ma[i], PG_CLEANCHK); 657 } 658 } 659 if (object->generation != curgeneration) 660 goto rescan; 661 } 662 663#if 0 664 VOP_FSYNC(vp, NULL, (pagerflags & VM_PAGER_PUT_SYNC)?MNT_WAIT:0, curproc); 665#endif 666 667 vm_object_clear_flag(object, OBJ_CLEANING); 668 return; 669} 670 671#ifdef not_used 672/* XXX I cannot tell if this should be an exported symbol */ 673/* 674 * vm_object_deactivate_pages 675 * 676 * Deactivate all pages in the specified object. (Keep its pages 677 * in memory even though it is no longer referenced.) 678 * 679 * The object must be locked. 680 */ 681static void 682vm_object_deactivate_pages(object) 683 vm_object_t object; 684{ 685 vm_page_t p, next; 686 687 for (p = TAILQ_FIRST(&object->memq); p != NULL; p = next) { 688 next = TAILQ_NEXT(p, listq); 689 vm_page_deactivate(p); 690 } 691} 692#endif 693 694/* 695 * Same as vm_object_pmap_copy, except range checking really 696 * works, and is meant for small sections of an object. 697 * 698 * This code protects resident pages by making them read-only 699 * and is typically called on a fork or split when a page 700 * is converted to copy-on-write. 701 * 702 * NOTE: If the page is already at VM_PROT_NONE, calling 703 * vm_page_protect will have no effect. 704 */ 705 706void 707vm_object_pmap_copy_1(object, start, end) 708 vm_object_t object; 709 vm_pindex_t start; 710 vm_pindex_t end; 711{ 712 vm_pindex_t idx; 713 vm_page_t p; 714 715 if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0) 716 return; 717 718 for (idx = start; idx < end; idx++) { 719 p = vm_page_lookup(object, idx); 720 if (p == NULL) 721 continue; 722 vm_page_protect(p, VM_PROT_READ); 723 } 724} 725 726/* 727 * vm_object_pmap_remove: 728 * 729 * Removes all physical pages in the specified 730 * object range from all physical maps. 731 * 732 * The object must *not* be locked. 733 */ 734void 735vm_object_pmap_remove(object, start, end) 736 vm_object_t object; 737 vm_pindex_t start; 738 vm_pindex_t end; 739{ 740 vm_page_t p; 741 742 if (object == NULL) 743 return; 744 TAILQ_FOREACH(p, &object->memq, listq) { 745 if (p->pindex >= start && p->pindex < end) 746 vm_page_protect(p, VM_PROT_NONE); 747 } 748 if ((start == 0) && (object->size == end)) 749 vm_object_clear_flag(object, OBJ_WRITEABLE); 750} 751 752/* 753 * vm_object_madvise: 754 * 755 * Implements the madvise function at the object/page level. 756 * 757 * MADV_WILLNEED (any object) 758 * 759 * Activate the specified pages if they are resident. 760 * 761 * MADV_DONTNEED (any object) 762 * 763 * Deactivate the specified pages if they are resident. 764 * 765 * MADV_FREE (OBJT_DEFAULT/OBJT_SWAP objects, 766 * OBJ_ONEMAPPING only) 767 * 768 * Deactivate and clean the specified pages if they are 769 * resident. This permits the process to reuse the pages 770 * without faulting or the kernel to reclaim the pages 771 * without I/O. 772 */ 773void 774vm_object_madvise(object, pindex, count, advise) 775 vm_object_t object; 776 vm_pindex_t pindex; 777 int count; 778 int advise; 779{ 780 vm_pindex_t end, tpindex; 781 vm_object_t tobject; 782 vm_page_t m; 783 784 if (object == NULL) 785 return; 786 787 end = pindex + count; 788 789 /* 790 * Locate and adjust resident pages 791 */ 792 793 for (; pindex < end; pindex += 1) { 794relookup: 795 tobject = object; 796 tpindex = pindex; 797shadowlookup: 798 /* 799 * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages 800 * and those pages must be OBJ_ONEMAPPING. 801 */ 802 if (advise == MADV_FREE) { 803 if ((tobject->type != OBJT_DEFAULT && 804 tobject->type != OBJT_SWAP) || 805 (tobject->flags & OBJ_ONEMAPPING) == 0) { 806 continue; 807 } 808 } 809 810 m = vm_page_lookup(tobject, tpindex); 811 812 if (m == NULL) { 813 /* 814 * There may be swap even if there is no backing page 815 */ 816 if (advise == MADV_FREE && tobject->type == OBJT_SWAP) 817 swap_pager_freespace(tobject, tpindex, 1); 818 819 /* 820 * next object 821 */ 822 tobject = tobject->backing_object; 823 if (tobject == NULL) 824 continue; 825 tpindex += OFF_TO_IDX(tobject->backing_object_offset); 826 goto shadowlookup; 827 } 828 829 /* 830 * If the page is busy or not in a normal active state, 831 * we skip it. If the page is not managed there are no 832 * page queues to mess with. Things can break if we mess 833 * with pages in any of the below states. 834 */ 835 if ( 836 m->hold_count || 837 m->wire_count || 838 (m->flags & PG_UNMANAGED) || 839 m->valid != VM_PAGE_BITS_ALL 840 ) { 841 continue; 842 } 843 844 if (vm_page_sleep_busy(m, TRUE, "madvpo")) 845 goto relookup; 846 847 if (advise == MADV_WILLNEED) { 848 vm_page_activate(m); 849 } else if (advise == MADV_DONTNEED) { 850 vm_page_dontneed(m); 851 } else if (advise == MADV_FREE) { 852 /* 853 * Mark the page clean. This will allow the page 854 * to be freed up by the system. However, such pages 855 * are often reused quickly by malloc()/free() 856 * so we do not do anything that would cause 857 * a page fault if we can help it. 858 * 859 * Specifically, we do not try to actually free 860 * the page now nor do we try to put it in the 861 * cache (which would cause a page fault on reuse). 862 * 863 * But we do make the page is freeable as we 864 * can without actually taking the step of unmapping 865 * it. 866 */ 867 pmap_clear_modify(m); 868 m->dirty = 0; 869 m->act_count = 0; 870 vm_page_dontneed(m); 871 if (tobject->type == OBJT_SWAP) 872 swap_pager_freespace(tobject, tpindex, 1); 873 } 874 } 875} 876 877/* 878 * vm_object_shadow: 879 * 880 * Create a new object which is backed by the 881 * specified existing object range. The source 882 * object reference is deallocated. 883 * 884 * The new object and offset into that object 885 * are returned in the source parameters. 886 */ 887 888void 889vm_object_shadow(object, offset, length) 890 vm_object_t *object; /* IN/OUT */ 891 vm_ooffset_t *offset; /* IN/OUT */ 892 vm_size_t length; 893{ 894 vm_object_t source; 895 vm_object_t result; 896 897 source = *object; 898 899 /* 900 * Don't create the new object if the old object isn't shared. 901 */ 902 903 if (source != NULL && 904 source->ref_count == 1 && 905 source->handle == NULL && 906 (source->type == OBJT_DEFAULT || 907 source->type == OBJT_SWAP)) 908 return; 909 910 /* 911 * Allocate a new object with the given length 912 */ 913 result = vm_object_allocate(OBJT_DEFAULT, length); 914 KASSERT(result != NULL, ("vm_object_shadow: no object for shadowing")); 915 916 /* 917 * The new object shadows the source object, adding a reference to it. 918 * Our caller changes his reference to point to the new object, 919 * removing a reference to the source object. Net result: no change 920 * of reference count. 921 * 922 * Try to optimize the result object's page color when shadowing 923 * in order to maintain page coloring consistency in the combined 924 * shadowed object. 925 */ 926 result->backing_object = source; 927 if (source) { 928 TAILQ_INSERT_TAIL(&source->shadow_head, result, shadow_list); 929 source->shadow_count++; 930 source->generation++; 931 result->pg_color = (source->pg_color + OFF_TO_IDX(*offset)) & PQ_L2_MASK; 932 } 933 934 /* 935 * Store the offset into the source object, and fix up the offset into 936 * the new object. 937 */ 938 939 result->backing_object_offset = *offset; 940 941 /* 942 * Return the new things 943 */ 944 945 *offset = 0; 946 *object = result; 947} 948 949#define OBSC_TEST_ALL_SHADOWED 0x0001 950#define OBSC_COLLAPSE_NOWAIT 0x0002 951#define OBSC_COLLAPSE_WAIT 0x0004 952 953static __inline int 954vm_object_backing_scan(vm_object_t object, int op) 955{ 956 int s; 957 int r = 1; 958 vm_page_t p; 959 vm_object_t backing_object; 960 vm_pindex_t backing_offset_index; 961 962 s = splvm(); 963 964 backing_object = object->backing_object; 965 backing_offset_index = OFF_TO_IDX(object->backing_object_offset); 966 967 /* 968 * Initial conditions 969 */ 970 971 if (op & OBSC_TEST_ALL_SHADOWED) { 972 /* 973 * We do not want to have to test for the existence of 974 * swap pages in the backing object. XXX but with the 975 * new swapper this would be pretty easy to do. 976 * 977 * XXX what about anonymous MAP_SHARED memory that hasn't 978 * been ZFOD faulted yet? If we do not test for this, the 979 * shadow test may succeed! XXX 980 */ 981 if (backing_object->type != OBJT_DEFAULT) { 982 splx(s); 983 return(0); 984 } 985 } 986 if (op & OBSC_COLLAPSE_WAIT) { 987 vm_object_set_flag(backing_object, OBJ_DEAD); 988 } 989 990 /* 991 * Our scan 992 */ 993 994 p = TAILQ_FIRST(&backing_object->memq); 995 while (p) { 996 vm_page_t next = TAILQ_NEXT(p, listq); 997 vm_pindex_t new_pindex = p->pindex - backing_offset_index; 998 999 if (op & OBSC_TEST_ALL_SHADOWED) { 1000 vm_page_t pp; 1001 1002 /* 1003 * Ignore pages outside the parent object's range 1004 * and outside the parent object's mapping of the 1005 * backing object. 1006 * 1007 * note that we do not busy the backing object's 1008 * page. 1009 */ 1010 1011 if ( 1012 p->pindex < backing_offset_index || 1013 new_pindex >= object->size 1014 ) { 1015 p = next; 1016 continue; 1017 } 1018 1019 /* 1020 * See if the parent has the page or if the parent's 1021 * object pager has the page. If the parent has the 1022 * page but the page is not valid, the parent's 1023 * object pager must have the page. 1024 * 1025 * If this fails, the parent does not completely shadow 1026 * the object and we might as well give up now. 1027 */ 1028 1029 pp = vm_page_lookup(object, new_pindex); 1030 if ( 1031 (pp == NULL || pp->valid == 0) && 1032 !vm_pager_has_page(object, new_pindex, NULL, NULL) 1033 ) { 1034 r = 0; 1035 break; 1036 } 1037 } 1038 1039 /* 1040 * Check for busy page 1041 */ 1042 1043 if (op & (OBSC_COLLAPSE_WAIT | OBSC_COLLAPSE_NOWAIT)) { 1044 vm_page_t pp; 1045 1046 if (op & OBSC_COLLAPSE_NOWAIT) { 1047 if ( 1048 (p->flags & PG_BUSY) || 1049 !p->valid || 1050 p->hold_count || 1051 p->wire_count || 1052 p->busy 1053 ) { 1054 p = next; 1055 continue; 1056 } 1057 } else if (op & OBSC_COLLAPSE_WAIT) { 1058 if (vm_page_sleep_busy(p, TRUE, "vmocol")) { 1059 /* 1060 * If we slept, anything could have 1061 * happened. Since the object is 1062 * marked dead, the backing offset 1063 * should not have changed so we 1064 * just restart our scan. 1065 */ 1066 p = TAILQ_FIRST(&backing_object->memq); 1067 continue; 1068 } 1069 } 1070 1071 /* 1072 * Busy the page 1073 */ 1074 vm_page_busy(p); 1075 1076 KASSERT( 1077 p->object == backing_object, 1078 ("vm_object_qcollapse(): object mismatch") 1079 ); 1080 1081 /* 1082 * Destroy any associated swap 1083 */ 1084 if (backing_object->type == OBJT_SWAP) { 1085 swap_pager_freespace( 1086 backing_object, 1087 p->pindex, 1088 1 1089 ); 1090 } 1091 1092 if ( 1093 p->pindex < backing_offset_index || 1094 new_pindex >= object->size 1095 ) { 1096 /* 1097 * Page is out of the parent object's range, we 1098 * can simply destroy it. 1099 */ 1100 vm_page_protect(p, VM_PROT_NONE); 1101 vm_page_free(p); 1102 p = next; 1103 continue; 1104 } 1105 1106 pp = vm_page_lookup(object, new_pindex); 1107 if ( 1108 pp != NULL || 1109 vm_pager_has_page(object, new_pindex, NULL, NULL) 1110 ) { 1111 /* 1112 * page already exists in parent OR swap exists 1113 * for this location in the parent. Destroy 1114 * the original page from the backing object. 1115 * 1116 * Leave the parent's page alone 1117 */ 1118 vm_page_protect(p, VM_PROT_NONE); 1119 vm_page_free(p); 1120 p = next; 1121 continue; 1122 } 1123 1124 /* 1125 * Page does not exist in parent, rename the 1126 * page from the backing object to the main object. 1127 * 1128 * If the page was mapped to a process, it can remain 1129 * mapped through the rename. 1130 */ 1131 if ((p->queue - p->pc) == PQ_CACHE) 1132 vm_page_deactivate(p); 1133 1134 vm_page_rename(p, object, new_pindex); 1135 /* page automatically made dirty by rename */ 1136 } 1137 p = next; 1138 } 1139 splx(s); 1140 return(r); 1141} 1142 1143 1144/* 1145 * this version of collapse allows the operation to occur earlier and 1146 * when paging_in_progress is true for an object... This is not a complete 1147 * operation, but should plug 99.9% of the rest of the leaks. 1148 */ 1149static void 1150vm_object_qcollapse(object) 1151 vm_object_t object; 1152{ 1153 vm_object_t backing_object = object->backing_object; 1154 1155 if (backing_object->ref_count != 1) 1156 return; 1157 1158 backing_object->ref_count += 2; 1159 1160 vm_object_backing_scan(object, OBSC_COLLAPSE_NOWAIT); 1161 1162 backing_object->ref_count -= 2; 1163} 1164 1165/* 1166 * vm_object_collapse: 1167 * 1168 * Collapse an object with the object backing it. 1169 * Pages in the backing object are moved into the 1170 * parent, and the backing object is deallocated. 1171 */ 1172void 1173vm_object_collapse(object) 1174 vm_object_t object; 1175{ 1176 while (TRUE) { 1177 vm_object_t backing_object; 1178 1179 /* 1180 * Verify that the conditions are right for collapse: 1181 * 1182 * The object exists and the backing object exists. 1183 */ 1184 if (object == NULL) 1185 break; 1186 1187 if ((backing_object = object->backing_object) == NULL) 1188 break; 1189 1190 /* 1191 * we check the backing object first, because it is most likely 1192 * not collapsable. 1193 */ 1194 if (backing_object->handle != NULL || 1195 (backing_object->type != OBJT_DEFAULT && 1196 backing_object->type != OBJT_SWAP) || 1197 (backing_object->flags & OBJ_DEAD) || 1198 object->handle != NULL || 1199 (object->type != OBJT_DEFAULT && 1200 object->type != OBJT_SWAP) || 1201 (object->flags & OBJ_DEAD)) { 1202 break; 1203 } 1204 1205 if ( 1206 object->paging_in_progress != 0 || 1207 backing_object->paging_in_progress != 0 1208 ) { 1209 vm_object_qcollapse(object); 1210 break; 1211 } 1212 1213 /* 1214 * We know that we can either collapse the backing object (if 1215 * the parent is the only reference to it) or (perhaps) have 1216 * the parent bypass the object if the parent happens to shadow 1217 * all the resident pages in the entire backing object. 1218 * 1219 * This is ignoring pager-backed pages such as swap pages. 1220 * vm_object_backing_scan fails the shadowing test in this 1221 * case. 1222 */ 1223 1224 if (backing_object->ref_count == 1) { 1225 /* 1226 * If there is exactly one reference to the backing 1227 * object, we can collapse it into the parent. 1228 */ 1229 1230 vm_object_backing_scan(object, OBSC_COLLAPSE_WAIT); 1231 1232 /* 1233 * Move the pager from backing_object to object. 1234 */ 1235 1236 if (backing_object->type == OBJT_SWAP) { 1237 vm_object_pip_add(backing_object, 1); 1238 1239 /* 1240 * scrap the paging_offset junk and do a 1241 * discrete copy. This also removes major 1242 * assumptions about how the swap-pager 1243 * works from where it doesn't belong. The 1244 * new swapper is able to optimize the 1245 * destroy-source case. 1246 */ 1247 1248 vm_object_pip_add(object, 1); 1249 swap_pager_copy( 1250 backing_object, 1251 object, 1252 OFF_TO_IDX(object->backing_object_offset), TRUE); 1253 vm_object_pip_wakeup(object); 1254 1255 vm_object_pip_wakeup(backing_object); 1256 } 1257 /* 1258 * Object now shadows whatever backing_object did. 1259 * Note that the reference to 1260 * backing_object->backing_object moves from within 1261 * backing_object to within object. 1262 */ 1263 1264 TAILQ_REMOVE( 1265 &object->backing_object->shadow_head, 1266 object, 1267 shadow_list 1268 ); 1269 object->backing_object->shadow_count--; 1270 object->backing_object->generation++; 1271 if (backing_object->backing_object) { 1272 TAILQ_REMOVE( 1273 &backing_object->backing_object->shadow_head, 1274 backing_object, 1275 shadow_list 1276 ); 1277 backing_object->backing_object->shadow_count--; 1278 backing_object->backing_object->generation++; 1279 } 1280 object->backing_object = backing_object->backing_object; 1281 if (object->backing_object) { 1282 TAILQ_INSERT_TAIL( 1283 &object->backing_object->shadow_head, 1284 object, 1285 shadow_list 1286 ); 1287 object->backing_object->shadow_count++; 1288 object->backing_object->generation++; 1289 } 1290 1291 object->backing_object_offset += 1292 backing_object->backing_object_offset; 1293 1294 /* 1295 * Discard backing_object. 1296 * 1297 * Since the backing object has no pages, no pager left, 1298 * and no object references within it, all that is 1299 * necessary is to dispose of it. 1300 */ 1301 1302 TAILQ_REMOVE( 1303 &vm_object_list, 1304 backing_object, 1305 object_list 1306 ); 1307 vm_object_count--; 1308 1309 zfree(obj_zone, backing_object); 1310 1311 object_collapses++; 1312 } else { 1313 vm_object_t new_backing_object; 1314 1315 /* 1316 * If we do not entirely shadow the backing object, 1317 * there is nothing we can do so we give up. 1318 */ 1319 1320 if (vm_object_backing_scan(object, OBSC_TEST_ALL_SHADOWED) == 0) { 1321 break; 1322 } 1323 1324 /* 1325 * Make the parent shadow the next object in the 1326 * chain. Deallocating backing_object will not remove 1327 * it, since its reference count is at least 2. 1328 */ 1329 1330 TAILQ_REMOVE( 1331 &backing_object->shadow_head, 1332 object, 1333 shadow_list 1334 ); 1335 backing_object->shadow_count--; 1336 backing_object->generation++; 1337 1338 new_backing_object = backing_object->backing_object; 1339 if ((object->backing_object = new_backing_object) != NULL) { 1340 vm_object_reference(new_backing_object); 1341 TAILQ_INSERT_TAIL( 1342 &new_backing_object->shadow_head, 1343 object, 1344 shadow_list 1345 ); 1346 new_backing_object->shadow_count++; 1347 new_backing_object->generation++; 1348 object->backing_object_offset += 1349 backing_object->backing_object_offset; 1350 } 1351 1352 /* 1353 * Drop the reference count on backing_object. Since 1354 * its ref_count was at least 2, it will not vanish; 1355 * so we don't need to call vm_object_deallocate, but 1356 * we do anyway. 1357 */ 1358 vm_object_deallocate(backing_object); 1359 object_bypasses++; 1360 } 1361 1362 /* 1363 * Try again with this object's new backing object. 1364 */ 1365 } 1366} 1367 1368/* 1369 * vm_object_page_remove: [internal] 1370 * 1371 * Removes all physical pages in the specified 1372 * object range from the object's list of pages. 1373 * 1374 * The object must be locked. 1375 */ 1376void 1377vm_object_page_remove(object, start, end, clean_only) 1378 vm_object_t object; 1379 vm_pindex_t start; 1380 vm_pindex_t end; 1381 boolean_t clean_only; 1382{ 1383 vm_page_t p, next; 1384 unsigned int size; 1385 int all; 1386 1387 if (object == NULL || 1388 object->resident_page_count == 0) 1389 return; 1390 1391 all = ((end == 0) && (start == 0)); 1392 1393 /* 1394 * Since physically-backed objects do not use managed pages, we can't 1395 * remove pages from the object (we must instead remove the page 1396 * references, and then destroy the object). 1397 */ 1398 KASSERT(object->type != OBJT_PHYS, ("attempt to remove pages from a physical object")); 1399 1400 vm_object_pip_add(object, 1); 1401again: 1402 size = end - start; 1403 if (all || size > object->resident_page_count / 4) { 1404 for (p = TAILQ_FIRST(&object->memq); p != NULL; p = next) { 1405 next = TAILQ_NEXT(p, listq); 1406 if (all || ((start <= p->pindex) && (p->pindex < end))) { 1407 if (p->wire_count != 0) { 1408 vm_page_protect(p, VM_PROT_NONE); 1409 if (!clean_only) 1410 p->valid = 0; 1411 continue; 1412 } 1413 1414 /* 1415 * The busy flags are only cleared at 1416 * interrupt -- minimize the spl transitions 1417 */ 1418 1419 if (vm_page_sleep_busy(p, TRUE, "vmopar")) 1420 goto again; 1421 1422 if (clean_only && p->valid) { 1423 vm_page_test_dirty(p); 1424 if (p->valid & p->dirty) 1425 continue; 1426 } 1427 1428 vm_page_busy(p); 1429 vm_page_protect(p, VM_PROT_NONE); 1430 vm_page_free(p); 1431 } 1432 } 1433 } else { 1434 while (size > 0) { 1435 if ((p = vm_page_lookup(object, start)) != 0) { 1436 1437 if (p->wire_count != 0) { 1438 vm_page_protect(p, VM_PROT_NONE); 1439 if (!clean_only) 1440 p->valid = 0; 1441 start += 1; 1442 size -= 1; 1443 continue; 1444 } 1445 1446 /* 1447 * The busy flags are only cleared at 1448 * interrupt -- minimize the spl transitions 1449 */ 1450 if (vm_page_sleep_busy(p, TRUE, "vmopar")) 1451 goto again; 1452 1453 if (clean_only && p->valid) { 1454 vm_page_test_dirty(p); 1455 if (p->valid & p->dirty) { 1456 start += 1; 1457 size -= 1; 1458 continue; 1459 } 1460 } 1461 1462 vm_page_busy(p); 1463 vm_page_protect(p, VM_PROT_NONE); 1464 vm_page_free(p); 1465 } 1466 start += 1; 1467 size -= 1; 1468 } 1469 } 1470 vm_object_pip_wakeup(object); 1471} 1472 1473/* 1474 * Routine: vm_object_coalesce 1475 * Function: Coalesces two objects backing up adjoining 1476 * regions of memory into a single object. 1477 * 1478 * returns TRUE if objects were combined. 1479 * 1480 * NOTE: Only works at the moment if the second object is NULL - 1481 * if it's not, which object do we lock first? 1482 * 1483 * Parameters: 1484 * prev_object First object to coalesce 1485 * prev_offset Offset into prev_object 1486 * next_object Second object into coalesce 1487 * next_offset Offset into next_object 1488 * 1489 * prev_size Size of reference to prev_object 1490 * next_size Size of reference to next_object 1491 * 1492 * Conditions: 1493 * The object must *not* be locked. 1494 */ 1495boolean_t 1496vm_object_coalesce(prev_object, prev_pindex, prev_size, next_size) 1497 vm_object_t prev_object; 1498 vm_pindex_t prev_pindex; 1499 vm_size_t prev_size, next_size; 1500{ 1501 vm_pindex_t next_pindex; 1502 1503 if (prev_object == NULL) { 1504 return (TRUE); 1505 } 1506 1507 if (prev_object->type != OBJT_DEFAULT && 1508 prev_object->type != OBJT_SWAP) { 1509 return (FALSE); 1510 } 1511 1512 /* 1513 * Try to collapse the object first 1514 */ 1515 vm_object_collapse(prev_object); 1516 1517 /* 1518 * Can't coalesce if: . more than one reference . paged out . shadows 1519 * another object . has a copy elsewhere (any of which mean that the 1520 * pages not mapped to prev_entry may be in use anyway) 1521 */ 1522 1523 if (prev_object->backing_object != NULL) { 1524 return (FALSE); 1525 } 1526 1527 prev_size >>= PAGE_SHIFT; 1528 next_size >>= PAGE_SHIFT; 1529 next_pindex = prev_pindex + prev_size; 1530 1531 if ((prev_object->ref_count > 1) && 1532 (prev_object->size != next_pindex)) { 1533 return (FALSE); 1534 } 1535 1536 /* 1537 * Remove any pages that may still be in the object from a previous 1538 * deallocation. 1539 */ 1540 if (next_pindex < prev_object->size) { 1541 vm_object_page_remove(prev_object, 1542 next_pindex, 1543 next_pindex + next_size, FALSE); 1544 if (prev_object->type == OBJT_SWAP) 1545 swap_pager_freespace(prev_object, 1546 next_pindex, next_size); 1547 } 1548 1549 /* 1550 * Extend the object if necessary. 1551 */ 1552 if (next_pindex + next_size > prev_object->size) 1553 prev_object->size = next_pindex + next_size; 1554 1555 return (TRUE); 1556} 1557 1558#include "opt_ddb.h" 1559#ifdef DDB 1560#include <sys/kernel.h> 1561 1562#include <sys/cons.h> 1563 1564#include <ddb/ddb.h> 1565 1566static int _vm_object_in_map __P((vm_map_t map, vm_object_t object, 1567 vm_map_entry_t entry)); 1568static int vm_object_in_map __P((vm_object_t object)); 1569 1570static int 1571_vm_object_in_map(map, object, entry) 1572 vm_map_t map; 1573 vm_object_t object; 1574 vm_map_entry_t entry; 1575{ 1576 vm_map_t tmpm; 1577 vm_map_entry_t tmpe; 1578 vm_object_t obj; 1579 int entcount; 1580 1581 if (map == 0) 1582 return 0; 1583 1584 if (entry == 0) { 1585 tmpe = map->header.next; 1586 entcount = map->nentries; 1587 while (entcount-- && (tmpe != &map->header)) { 1588 if( _vm_object_in_map(map, object, tmpe)) { 1589 return 1; 1590 } 1591 tmpe = tmpe->next; 1592 } 1593 } else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 1594 tmpm = entry->object.sub_map; 1595 tmpe = tmpm->header.next; 1596 entcount = tmpm->nentries; 1597 while (entcount-- && tmpe != &tmpm->header) { 1598 if( _vm_object_in_map(tmpm, object, tmpe)) { 1599 return 1; 1600 } 1601 tmpe = tmpe->next; 1602 } 1603 } else if ((obj = entry->object.vm_object) != NULL) { 1604 for(; obj; obj=obj->backing_object) 1605 if( obj == object) { 1606 return 1; 1607 } 1608 } 1609 return 0; 1610} 1611 1612static int 1613vm_object_in_map( object) 1614 vm_object_t object; 1615{ 1616 struct proc *p; 1617 1618 sx_slock(&allproc_lock); 1619 LIST_FOREACH(p, &allproc, p_list) { 1620 if( !p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */) 1621 continue; 1622 if( _vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) { 1623 sx_sunlock(&allproc_lock); 1624 return 1; 1625 } 1626 } 1627 sx_sunlock(&allproc_lock); 1628 if( _vm_object_in_map( kernel_map, object, 0)) 1629 return 1; 1630 if( _vm_object_in_map( kmem_map, object, 0)) 1631 return 1; 1632 if( _vm_object_in_map( pager_map, object, 0)) 1633 return 1; 1634 if( _vm_object_in_map( buffer_map, object, 0)) 1635 return 1; 1636 if( _vm_object_in_map( mb_map, object, 0)) 1637 return 1; 1638 return 0; 1639} 1640 1641DB_SHOW_COMMAND(vmochk, vm_object_check) 1642{ 1643 vm_object_t object; 1644 1645 /* 1646 * make sure that internal objs are in a map somewhere 1647 * and none have zero ref counts. 1648 */ 1649 TAILQ_FOREACH(object, &vm_object_list, object_list) { 1650 if (object->handle == NULL && 1651 (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) { 1652 if (object->ref_count == 0) { 1653 db_printf("vmochk: internal obj has zero ref count: %ld\n", 1654 (long)object->size); 1655 } 1656 if (!vm_object_in_map(object)) { 1657 db_printf( 1658 "vmochk: internal obj is not in a map: " 1659 "ref: %d, size: %lu: 0x%lx, backing_object: %p\n", 1660 object->ref_count, (u_long)object->size, 1661 (u_long)object->size, 1662 (void *)object->backing_object); 1663 } 1664 } 1665 } 1666} 1667 1668/* 1669 * vm_object_print: [ debug ] 1670 */ 1671DB_SHOW_COMMAND(object, vm_object_print_static) 1672{ 1673 /* XXX convert args. */ 1674 vm_object_t object = (vm_object_t)addr; 1675 boolean_t full = have_addr; 1676 1677 vm_page_t p; 1678 1679 /* XXX count is an (unused) arg. Avoid shadowing it. */ 1680#define count was_count 1681 1682 int count; 1683 1684 if (object == NULL) 1685 return; 1686 1687 db_iprintf( 1688 "Object %p: type=%d, size=0x%lx, res=%d, ref=%d, flags=0x%x\n", 1689 object, (int)object->type, (u_long)object->size, 1690 object->resident_page_count, object->ref_count, object->flags); 1691 /* 1692 * XXX no %qd in kernel. Truncate object->backing_object_offset. 1693 */ 1694 db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%lx\n", 1695 object->shadow_count, 1696 object->backing_object ? object->backing_object->ref_count : 0, 1697 object->backing_object, (long)object->backing_object_offset); 1698 1699 if (!full) 1700 return; 1701 1702 db_indent += 2; 1703 count = 0; 1704 TAILQ_FOREACH(p, &object->memq, listq) { 1705 if (count == 0) 1706 db_iprintf("memory:="); 1707 else if (count == 6) { 1708 db_printf("\n"); 1709 db_iprintf(" ..."); 1710 count = 0; 1711 } else 1712 db_printf(","); 1713 count++; 1714 1715 db_printf("(off=0x%lx,page=0x%lx)", 1716 (u_long) p->pindex, (u_long) VM_PAGE_TO_PHYS(p)); 1717 } 1718 if (count != 0) 1719 db_printf("\n"); 1720 db_indent -= 2; 1721} 1722 1723/* XXX. */ 1724#undef count 1725 1726/* XXX need this non-static entry for calling from vm_map_print. */ 1727void 1728vm_object_print(addr, have_addr, count, modif) 1729 /* db_expr_t */ long addr; 1730 boolean_t have_addr; 1731 /* db_expr_t */ long count; 1732 char *modif; 1733{ 1734 vm_object_print_static(addr, have_addr, count, modif); 1735} 1736 1737DB_SHOW_COMMAND(vmopag, vm_object_print_pages) 1738{ 1739 vm_object_t object; 1740 int nl = 0; 1741 int c; 1742 1743 TAILQ_FOREACH(object, &vm_object_list, object_list) { 1744 vm_pindex_t idx, fidx; 1745 vm_pindex_t osize; 1746 vm_offset_t pa = -1, padiff; 1747 int rcount; 1748 vm_page_t m; 1749 1750 db_printf("new object: %p\n", (void *)object); 1751 if ( nl > 18) { 1752 c = cngetc(); 1753 if (c != ' ') 1754 return; 1755 nl = 0; 1756 } 1757 nl++; 1758 rcount = 0; 1759 fidx = 0; 1760 osize = object->size; 1761 if (osize > 128) 1762 osize = 128; 1763 for(idx=0;idx<osize;idx++) { 1764 m = vm_page_lookup(object, idx); 1765 if (m == NULL) { 1766 if (rcount) { 1767 db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 1768 (long)fidx, rcount, (long)pa); 1769 if ( nl > 18) { 1770 c = cngetc(); 1771 if (c != ' ') 1772 return; 1773 nl = 0; 1774 } 1775 nl++; 1776 rcount = 0; 1777 } 1778 continue; 1779 } 1780 1781 1782 if (rcount && 1783 (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) { 1784 ++rcount; 1785 continue; 1786 } 1787 if (rcount) { 1788 padiff = pa + rcount * PAGE_SIZE - VM_PAGE_TO_PHYS(m); 1789 padiff >>= PAGE_SHIFT; 1790 padiff &= PQ_L2_MASK; 1791 if (padiff == 0) { 1792 pa = VM_PAGE_TO_PHYS(m) - rcount * PAGE_SIZE; 1793 ++rcount; 1794 continue; 1795 } 1796 db_printf(" index(%ld)run(%d)pa(0x%lx)", 1797 (long)fidx, rcount, (long)pa); 1798 db_printf("pd(%ld)\n", (long)padiff); 1799 if ( nl > 18) { 1800 c = cngetc(); 1801 if (c != ' ') 1802 return; 1803 nl = 0; 1804 } 1805 nl++; 1806 } 1807 fidx = idx; 1808 pa = VM_PAGE_TO_PHYS(m); 1809 rcount = 1; 1810 } 1811 if (rcount) { 1812 db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 1813 (long)fidx, rcount, (long)pa); 1814 if ( nl > 18) { 1815 c = cngetc(); 1816 if (c != ' ') 1817 return; 1818 nl = 0; 1819 } 1820 nl++; 1821 } 1822 } 1823} 1824#endif /* DDB */ 1825