vm_object.c revision 108012
1/* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $FreeBSD: head/sys/vm/vm_object.c 108012 2002-12-18 04:39:15Z alc $ 65 */ 66 67/* 68 * Virtual memory object module. 69 */ 70 71#include <sys/param.h> 72#include <sys/systm.h> 73#include <sys/lock.h> 74#include <sys/mman.h> 75#include <sys/mount.h> 76#include <sys/kernel.h> 77#include <sys/sysctl.h> 78#include <sys/mutex.h> 79#include <sys/proc.h> /* for curproc, pageproc */ 80#include <sys/socket.h> 81#include <sys/stdint.h> 82#include <sys/vnode.h> 83#include <sys/vmmeter.h> 84#include <sys/sx.h> 85 86#include <vm/vm.h> 87#include <vm/vm_param.h> 88#include <vm/pmap.h> 89#include <vm/vm_map.h> 90#include <vm/vm_object.h> 91#include <vm/vm_page.h> 92#include <vm/vm_pageout.h> 93#include <vm/vm_pager.h> 94#include <vm/swap_pager.h> 95#include <vm/vm_kern.h> 96#include <vm/vm_extern.h> 97#include <vm/uma.h> 98 99#define EASY_SCAN_FACTOR 8 100 101#define MSYNC_FLUSH_HARDSEQ 0x01 102#define MSYNC_FLUSH_SOFTSEQ 0x02 103 104/* 105 * msync / VM object flushing optimizations 106 */ 107static int msync_flush_flags = MSYNC_FLUSH_HARDSEQ | MSYNC_FLUSH_SOFTSEQ; 108SYSCTL_INT(_vm, OID_AUTO, msync_flush_flags, 109 CTLFLAG_RW, &msync_flush_flags, 0, ""); 110 111static void vm_object_qcollapse(vm_object_t object); 112static int vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration, int pagerflags); 113 114/* 115 * Virtual memory objects maintain the actual data 116 * associated with allocated virtual memory. A given 117 * page of memory exists within exactly one object. 118 * 119 * An object is only deallocated when all "references" 120 * are given up. Only one "reference" to a given 121 * region of an object should be writeable. 122 * 123 * Associated with each object is a list of all resident 124 * memory pages belonging to that object; this list is 125 * maintained by the "vm_page" module, and locked by the object's 126 * lock. 127 * 128 * Each object also records a "pager" routine which is 129 * used to retrieve (and store) pages to the proper backing 130 * storage. In addition, objects may be backed by other 131 * objects from which they were virtual-copied. 132 * 133 * The only items within the object structure which are 134 * modified after time of creation are: 135 * reference count locked by object's lock 136 * pager routine locked by object's lock 137 * 138 */ 139 140struct object_q vm_object_list; 141struct mtx vm_object_list_mtx; /* lock for object list and count */ 142vm_object_t kernel_object; 143vm_object_t kmem_object; 144static struct vm_object kernel_object_store; 145static struct vm_object kmem_object_store; 146extern int vm_pageout_page_count; 147 148static long object_collapses; 149static long object_bypasses; 150static int next_index; 151static uma_zone_t obj_zone; 152#define VM_OBJECTS_INIT 256 153 154static void vm_object_zinit(void *mem, int size); 155 156#ifdef INVARIANTS 157static void vm_object_zdtor(void *mem, int size, void *arg); 158 159static void 160vm_object_zdtor(void *mem, int size, void *arg) 161{ 162 vm_object_t object; 163 164 object = (vm_object_t)mem; 165 KASSERT(object->paging_in_progress == 0, 166 ("object %p paging_in_progress = %d", 167 object, object->paging_in_progress)); 168 KASSERT(object->resident_page_count == 0, 169 ("object %p resident_page_count = %d", 170 object, object->resident_page_count)); 171 KASSERT(object->shadow_count == 0, 172 ("object %p shadow_count = %d", 173 object, object->shadow_count)); 174} 175#endif 176 177static void 178vm_object_zinit(void *mem, int size) 179{ 180 vm_object_t object; 181 182 object = (vm_object_t)mem; 183 184 /* These are true for any object that has been freed */ 185 object->paging_in_progress = 0; 186 object->resident_page_count = 0; 187 object->shadow_count = 0; 188} 189 190void 191_vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object) 192{ 193 static int object_hash_rand; 194 int exp, incr; 195 196 TAILQ_INIT(&object->memq); 197 TAILQ_INIT(&object->shadow_head); 198 199 object->root = NULL; 200 object->type = type; 201 object->size = size; 202 object->ref_count = 1; 203 object->flags = 0; 204 if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP)) 205 vm_object_set_flag(object, OBJ_ONEMAPPING); 206 if (size > (PQ_L2_SIZE / 3 + PQ_PRIME1)) 207 incr = PQ_L2_SIZE / 3 + PQ_PRIME1; 208 else 209 incr = size; 210 do 211 object->pg_color = next_index; 212 while (!atomic_cmpset_int(&next_index, object->pg_color, 213 (object->pg_color + incr) & PQ_L2_MASK)); 214 object->handle = NULL; 215 object->backing_object = NULL; 216 object->backing_object_offset = (vm_ooffset_t) 0; 217 /* 218 * Try to generate a number that will spread objects out in the 219 * hash table. We 'wipe' new objects across the hash in 128 page 220 * increments plus 1 more to offset it a little more by the time 221 * it wraps around. 222 */ 223 do { 224 exp = object_hash_rand; 225 object->hash_rand = exp - 129; 226 } while (!atomic_cmpset_int(&object_hash_rand, exp, object->hash_rand)); 227 228 atomic_add_int(&object->generation, 1); 229 230 mtx_lock(&vm_object_list_mtx); 231 TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); 232 mtx_unlock(&vm_object_list_mtx); 233} 234 235/* 236 * vm_object_init: 237 * 238 * Initialize the VM objects module. 239 */ 240void 241vm_object_init(void) 242{ 243 TAILQ_INIT(&vm_object_list); 244 mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF); 245 246 kernel_object = &kernel_object_store; 247 _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), 248 kernel_object); 249 250 kmem_object = &kmem_object_store; 251 _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), 252 kmem_object); 253 obj_zone = uma_zcreate("VM OBJECT", sizeof (struct vm_object), NULL, 254#ifdef INVARIANTS 255 vm_object_zdtor, 256#else 257 NULL, 258#endif 259 vm_object_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 260 uma_prealloc(obj_zone, VM_OBJECTS_INIT); 261} 262 263void 264vm_object_init2(void) 265{ 266} 267 268void 269vm_object_set_flag(vm_object_t object, u_short bits) 270{ 271 object->flags |= bits; 272} 273 274void 275vm_object_clear_flag(vm_object_t object, u_short bits) 276{ 277 GIANT_REQUIRED; 278 object->flags &= ~bits; 279} 280 281void 282vm_object_pip_add(vm_object_t object, short i) 283{ 284 GIANT_REQUIRED; 285 object->paging_in_progress += i; 286} 287 288void 289vm_object_pip_subtract(vm_object_t object, short i) 290{ 291 GIANT_REQUIRED; 292 object->paging_in_progress -= i; 293} 294 295void 296vm_object_pip_wakeup(vm_object_t object) 297{ 298 GIANT_REQUIRED; 299 object->paging_in_progress--; 300 if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) { 301 vm_object_clear_flag(object, OBJ_PIPWNT); 302 wakeup(object); 303 } 304} 305 306void 307vm_object_pip_wakeupn(vm_object_t object, short i) 308{ 309 GIANT_REQUIRED; 310 if (i) 311 object->paging_in_progress -= i; 312 if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) { 313 vm_object_clear_flag(object, OBJ_PIPWNT); 314 wakeup(object); 315 } 316} 317 318void 319vm_object_pip_sleep(vm_object_t object, char *waitid) 320{ 321 GIANT_REQUIRED; 322 if (object->paging_in_progress) { 323 int s = splvm(); 324 if (object->paging_in_progress) { 325 vm_object_set_flag(object, OBJ_PIPWNT); 326 tsleep(object, PVM, waitid, 0); 327 } 328 splx(s); 329 } 330} 331 332void 333vm_object_pip_wait(vm_object_t object, char *waitid) 334{ 335 GIANT_REQUIRED; 336 while (object->paging_in_progress) 337 vm_object_pip_sleep(object, waitid); 338} 339 340/* 341 * vm_object_allocate_wait 342 * 343 * Return a new object with the given size, and give the user the 344 * option of waiting for it to complete or failing if the needed 345 * memory isn't available. 346 */ 347vm_object_t 348vm_object_allocate_wait(objtype_t type, vm_pindex_t size, int flags) 349{ 350 vm_object_t result; 351 352 result = (vm_object_t) uma_zalloc(obj_zone, flags); 353 354 if (result != NULL) 355 _vm_object_allocate(type, size, result); 356 357 return (result); 358} 359 360/* 361 * vm_object_allocate: 362 * 363 * Returns a new object with the given size. 364 */ 365vm_object_t 366vm_object_allocate(objtype_t type, vm_pindex_t size) 367{ 368 return(vm_object_allocate_wait(type, size, M_WAITOK)); 369} 370 371 372/* 373 * vm_object_reference: 374 * 375 * Gets another reference to the given object. 376 */ 377void 378vm_object_reference(vm_object_t object) 379{ 380 if (object == NULL) 381 return; 382 383 vm_object_lock(object); 384#if 0 385 /* object can be re-referenced during final cleaning */ 386 KASSERT(!(object->flags & OBJ_DEAD), 387 ("vm_object_reference: attempting to reference dead obj")); 388#endif 389 390 object->ref_count++; 391 if (object->type == OBJT_VNODE) { 392 while (vget((struct vnode *) object->handle, LK_RETRY, curthread)) { 393 printf("vm_object_reference: delay in getting object\n"); 394 } 395 } 396 vm_object_unlock(object); 397} 398 399/* 400 * handle deallocating a object of type OBJT_VNODE 401 */ 402void 403vm_object_vndeallocate(vm_object_t object) 404{ 405 struct vnode *vp = (struct vnode *) object->handle; 406 407 GIANT_REQUIRED; 408 KASSERT(object->type == OBJT_VNODE, 409 ("vm_object_vndeallocate: not a vnode object")); 410 KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp")); 411#ifdef INVARIANTS 412 if (object->ref_count == 0) { 413 vprint("vm_object_vndeallocate", vp); 414 panic("vm_object_vndeallocate: bad object reference count"); 415 } 416#endif 417 418 object->ref_count--; 419 if (object->ref_count == 0) { 420 mp_fixme("Unlocked vflag access."); 421 vp->v_vflag &= ~VV_TEXT; 422#ifdef ENABLE_VFS_IOOPT 423 vm_object_clear_flag(object, OBJ_OPT); 424#endif 425 } 426 /* 427 * vrele may need a vop lock 428 */ 429 vrele(vp); 430} 431 432/* 433 * vm_object_deallocate: 434 * 435 * Release a reference to the specified object, 436 * gained either through a vm_object_allocate 437 * or a vm_object_reference call. When all references 438 * are gone, storage associated with this object 439 * may be relinquished. 440 * 441 * No object may be locked. 442 */ 443void 444vm_object_deallocate(vm_object_t object) 445{ 446 vm_object_t temp; 447 448 mtx_lock(&Giant); 449 while (object != NULL) { 450 451 if (object->type == OBJT_VNODE) { 452 vm_object_vndeallocate(object); 453 mtx_unlock(&Giant); 454 return; 455 } 456 457 KASSERT(object->ref_count != 0, 458 ("vm_object_deallocate: object deallocated too many times: %d", object->type)); 459 460 /* 461 * If the reference count goes to 0 we start calling 462 * vm_object_terminate() on the object chain. 463 * A ref count of 1 may be a special case depending on the 464 * shadow count being 0 or 1. 465 */ 466 object->ref_count--; 467 if (object->ref_count > 1) { 468 mtx_unlock(&Giant); 469 return; 470 } else if (object->ref_count == 1) { 471 if (object->shadow_count == 0) { 472 vm_object_set_flag(object, OBJ_ONEMAPPING); 473 } else if ((object->shadow_count == 1) && 474 (object->handle == NULL) && 475 (object->type == OBJT_DEFAULT || 476 object->type == OBJT_SWAP)) { 477 vm_object_t robject; 478 479 robject = TAILQ_FIRST(&object->shadow_head); 480 KASSERT(robject != NULL, 481 ("vm_object_deallocate: ref_count: %d, shadow_count: %d", 482 object->ref_count, 483 object->shadow_count)); 484 if ((robject->handle == NULL) && 485 (robject->type == OBJT_DEFAULT || 486 robject->type == OBJT_SWAP)) { 487 488 robject->ref_count++; 489 490 while ( 491 robject->paging_in_progress || 492 object->paging_in_progress 493 ) { 494 vm_object_pip_sleep(robject, "objde1"); 495 vm_object_pip_sleep(object, "objde2"); 496 } 497 498 if (robject->ref_count == 1) { 499 robject->ref_count--; 500 object = robject; 501 goto doterm; 502 } 503 504 object = robject; 505 vm_object_collapse(object); 506 continue; 507 } 508 } 509 mtx_unlock(&Giant); 510 return; 511 } 512doterm: 513 temp = object->backing_object; 514 if (temp) { 515 TAILQ_REMOVE(&temp->shadow_head, object, shadow_list); 516 temp->shadow_count--; 517#ifdef ENABLE_VFS_IOOPT 518 if (temp->ref_count == 0) 519 vm_object_clear_flag(temp, OBJ_OPT); 520#endif 521 temp->generation++; 522 object->backing_object = NULL; 523 } 524 /* 525 * Don't double-terminate, we could be in a termination 526 * recursion due to the terminate having to sync data 527 * to disk. 528 */ 529 if ((object->flags & OBJ_DEAD) == 0) 530 vm_object_terminate(object); 531 object = temp; 532 } 533 mtx_unlock(&Giant); 534} 535 536/* 537 * vm_object_terminate actually destroys the specified object, freeing 538 * up all previously used resources. 539 * 540 * The object must be locked. 541 * This routine may block. 542 */ 543void 544vm_object_terminate(vm_object_t object) 545{ 546 vm_page_t p; 547 int s; 548 549 GIANT_REQUIRED; 550 551 /* 552 * Make sure no one uses us. 553 */ 554 vm_object_set_flag(object, OBJ_DEAD); 555 556 /* 557 * wait for the pageout daemon to be done with the object 558 */ 559 vm_object_pip_wait(object, "objtrm"); 560 561 KASSERT(!object->paging_in_progress, 562 ("vm_object_terminate: pageout in progress")); 563 564 /* 565 * Clean and free the pages, as appropriate. All references to the 566 * object are gone, so we don't need to lock it. 567 */ 568 if (object->type == OBJT_VNODE) { 569 struct vnode *vp; 570 571#ifdef ENABLE_VFS_IOOPT 572 /* 573 * Freeze optimized copies. 574 */ 575 vm_freeze_copyopts(object, 0, object->size); 576#endif 577 /* 578 * Clean pages and flush buffers. 579 */ 580 vm_object_page_clean(object, 0, 0, OBJPC_SYNC); 581 582 vp = (struct vnode *) object->handle; 583 vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0); 584 } 585 586 KASSERT(object->ref_count == 0, 587 ("vm_object_terminate: object with references, ref_count=%d", 588 object->ref_count)); 589 590 /* 591 * Now free any remaining pages. For internal objects, this also 592 * removes them from paging queues. Don't free wired pages, just 593 * remove them from the object. 594 */ 595 s = splvm(); 596 vm_page_lock_queues(); 597 while ((p = TAILQ_FIRST(&object->memq)) != NULL) { 598 KASSERT(!p->busy && (p->flags & PG_BUSY) == 0, 599 ("vm_object_terminate: freeing busy page %p " 600 "p->busy = %d, p->flags %x\n", p, p->busy, p->flags)); 601 if (p->wire_count == 0) { 602 vm_page_busy(p); 603 vm_page_free(p); 604 cnt.v_pfree++; 605 } else { 606 vm_page_busy(p); 607 vm_page_remove(p); 608 } 609 } 610 vm_page_unlock_queues(); 611 splx(s); 612 613 /* 614 * Let the pager know object is dead. 615 */ 616 vm_pager_deallocate(object); 617 618 /* 619 * Remove the object from the global object list. 620 */ 621 mtx_lock(&vm_object_list_mtx); 622 TAILQ_REMOVE(&vm_object_list, object, object_list); 623 mtx_unlock(&vm_object_list_mtx); 624 625 wakeup(object); 626 627 /* 628 * Free the space for the object. 629 */ 630 uma_zfree(obj_zone, object); 631} 632 633/* 634 * vm_object_page_clean 635 * 636 * Clean all dirty pages in the specified range of object. Leaves page 637 * on whatever queue it is currently on. If NOSYNC is set then do not 638 * write out pages with PG_NOSYNC set (originally comes from MAP_NOSYNC), 639 * leaving the object dirty. 640 * 641 * Odd semantics: if start == end, we clean everything. 642 * 643 * The object must be locked. 644 */ 645void 646vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int flags) 647{ 648 vm_page_t p, np; 649 vm_pindex_t tstart, tend; 650 vm_pindex_t pi; 651 struct vnode *vp; 652 int clearobjflags; 653 int pagerflags; 654 int curgeneration; 655 656 GIANT_REQUIRED; 657 658 if (object->type != OBJT_VNODE || 659 (object->flags & OBJ_MIGHTBEDIRTY) == 0) 660 return; 661 662 pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) ? VM_PAGER_PUT_SYNC : 0; 663 pagerflags |= (flags & OBJPC_INVAL) ? VM_PAGER_PUT_INVAL : 0; 664 665 vp = object->handle; 666 667 vm_object_set_flag(object, OBJ_CLEANING); 668 669 tstart = start; 670 if (end == 0) { 671 tend = object->size; 672 } else { 673 tend = end; 674 } 675 676 /* 677 * If the caller is smart and only msync()s a range he knows is 678 * dirty, we may be able to avoid an object scan. This results in 679 * a phenominal improvement in performance. We cannot do this 680 * as a matter of course because the object may be huge - e.g. 681 * the size might be in the gigabytes or terrabytes. 682 */ 683 if (msync_flush_flags & MSYNC_FLUSH_HARDSEQ) { 684 vm_pindex_t tscan; 685 int scanlimit; 686 int scanreset; 687 688 scanreset = object->resident_page_count / EASY_SCAN_FACTOR; 689 if (scanreset < 16) 690 scanreset = 16; 691 692 scanlimit = scanreset; 693 tscan = tstart; 694 while (tscan < tend) { 695 curgeneration = object->generation; 696 p = vm_page_lookup(object, tscan); 697 if (p == NULL || p->valid == 0 || 698 (p->queue - p->pc) == PQ_CACHE) { 699 if (--scanlimit == 0) 700 break; 701 ++tscan; 702 continue; 703 } 704 vm_page_test_dirty(p); 705 if ((p->dirty & p->valid) == 0) { 706 if (--scanlimit == 0) 707 break; 708 ++tscan; 709 continue; 710 } 711 /* 712 * If we have been asked to skip nosync pages and 713 * this is a nosync page, we can't continue. 714 */ 715 if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) { 716 if (--scanlimit == 0) 717 break; 718 ++tscan; 719 continue; 720 } 721 scanlimit = scanreset; 722 723 /* 724 * This returns 0 if it was unable to busy the first 725 * page (i.e. had to sleep). 726 */ 727 tscan += vm_object_page_collect_flush(object, p, curgeneration, pagerflags); 728 } 729 730 /* 731 * If everything was dirty and we flushed it successfully, 732 * and the requested range is not the entire object, we 733 * don't have to mess with CLEANCHK or MIGHTBEDIRTY and can 734 * return immediately. 735 */ 736 if (tscan >= tend && (tstart || tend < object->size)) { 737 vm_object_clear_flag(object, OBJ_CLEANING); 738 return; 739 } 740 } 741 742 /* 743 * Generally set CLEANCHK interlock and make the page read-only so 744 * we can then clear the object flags. 745 * 746 * However, if this is a nosync mmap then the object is likely to 747 * stay dirty so do not mess with the page and do not clear the 748 * object flags. 749 */ 750 clearobjflags = 1; 751 vm_page_lock_queues(); 752 TAILQ_FOREACH(p, &object->memq, listq) { 753 vm_page_flag_set(p, PG_CLEANCHK); 754 if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) 755 clearobjflags = 0; 756 else 757 pmap_page_protect(p, VM_PROT_READ); 758 } 759 vm_page_unlock_queues(); 760 761 if (clearobjflags && (tstart == 0) && (tend == object->size)) { 762 struct vnode *vp; 763 764 vm_object_clear_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY); 765 if (object->type == OBJT_VNODE && 766 (vp = (struct vnode *)object->handle) != NULL) { 767 VI_LOCK(vp); 768 if (vp->v_iflag & VI_OBJDIRTY) 769 vp->v_iflag &= ~VI_OBJDIRTY; 770 VI_UNLOCK(vp); 771 } 772 } 773 774rescan: 775 curgeneration = object->generation; 776 777 for (p = TAILQ_FIRST(&object->memq); p; p = np) { 778 int n; 779 780 np = TAILQ_NEXT(p, listq); 781 782again: 783 pi = p->pindex; 784 if (((p->flags & PG_CLEANCHK) == 0) || 785 (pi < tstart) || (pi >= tend) || 786 (p->valid == 0) || 787 ((p->queue - p->pc) == PQ_CACHE)) { 788 vm_page_flag_clear(p, PG_CLEANCHK); 789 continue; 790 } 791 792 vm_page_test_dirty(p); 793 if ((p->dirty & p->valid) == 0) { 794 vm_page_flag_clear(p, PG_CLEANCHK); 795 continue; 796 } 797 798 /* 799 * If we have been asked to skip nosync pages and this is a 800 * nosync page, skip it. Note that the object flags were 801 * not cleared in this case so we do not have to set them. 802 */ 803 if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) { 804 vm_page_flag_clear(p, PG_CLEANCHK); 805 continue; 806 } 807 808 n = vm_object_page_collect_flush(object, p, 809 curgeneration, pagerflags); 810 if (n == 0) 811 goto rescan; 812 813 if (object->generation != curgeneration) 814 goto rescan; 815 816 /* 817 * Try to optimize the next page. If we can't we pick up 818 * our (random) scan where we left off. 819 */ 820 if (msync_flush_flags & MSYNC_FLUSH_SOFTSEQ) { 821 if ((p = vm_page_lookup(object, pi + n)) != NULL) 822 goto again; 823 } 824 } 825 826#if 0 827 VOP_FSYNC(vp, NULL, (pagerflags & VM_PAGER_PUT_SYNC)?MNT_WAIT:0, curproc); 828#endif 829 830 vm_object_clear_flag(object, OBJ_CLEANING); 831 return; 832} 833 834static int 835vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration, int pagerflags) 836{ 837 int runlen; 838 int s; 839 int maxf; 840 int chkb; 841 int maxb; 842 int i; 843 vm_pindex_t pi; 844 vm_page_t maf[vm_pageout_page_count]; 845 vm_page_t mab[vm_pageout_page_count]; 846 vm_page_t ma[vm_pageout_page_count]; 847 848 s = splvm(); 849 vm_page_lock_queues(); 850 pi = p->pindex; 851 while (vm_page_sleep_if_busy(p, TRUE, "vpcwai")) { 852 if (object->generation != curgeneration) { 853 splx(s); 854 return(0); 855 } 856 vm_page_lock_queues(); 857 } 858 maxf = 0; 859 for(i = 1; i < vm_pageout_page_count; i++) { 860 vm_page_t tp; 861 862 if ((tp = vm_page_lookup(object, pi + i)) != NULL) { 863 if ((tp->flags & PG_BUSY) || 864 (tp->flags & PG_CLEANCHK) == 0 || 865 (tp->busy != 0)) 866 break; 867 if((tp->queue - tp->pc) == PQ_CACHE) { 868 vm_page_flag_clear(tp, PG_CLEANCHK); 869 break; 870 } 871 vm_page_test_dirty(tp); 872 if ((tp->dirty & tp->valid) == 0) { 873 vm_page_flag_clear(tp, PG_CLEANCHK); 874 break; 875 } 876 maf[ i - 1 ] = tp; 877 maxf++; 878 continue; 879 } 880 break; 881 } 882 883 maxb = 0; 884 chkb = vm_pageout_page_count - maxf; 885 if (chkb) { 886 for(i = 1; i < chkb;i++) { 887 vm_page_t tp; 888 889 if ((tp = vm_page_lookup(object, pi - i)) != NULL) { 890 if ((tp->flags & PG_BUSY) || 891 (tp->flags & PG_CLEANCHK) == 0 || 892 (tp->busy != 0)) 893 break; 894 if ((tp->queue - tp->pc) == PQ_CACHE) { 895 vm_page_flag_clear(tp, PG_CLEANCHK); 896 break; 897 } 898 vm_page_test_dirty(tp); 899 if ((tp->dirty & tp->valid) == 0) { 900 vm_page_flag_clear(tp, PG_CLEANCHK); 901 break; 902 } 903 mab[ i - 1 ] = tp; 904 maxb++; 905 continue; 906 } 907 break; 908 } 909 } 910 911 for(i = 0; i < maxb; i++) { 912 int index = (maxb - i) - 1; 913 ma[index] = mab[i]; 914 vm_page_flag_clear(ma[index], PG_CLEANCHK); 915 } 916 vm_page_flag_clear(p, PG_CLEANCHK); 917 ma[maxb] = p; 918 for(i = 0; i < maxf; i++) { 919 int index = (maxb + i) + 1; 920 ma[index] = maf[i]; 921 vm_page_flag_clear(ma[index], PG_CLEANCHK); 922 } 923 runlen = maxb + maxf + 1; 924 925 splx(s); 926 vm_pageout_flush(ma, runlen, pagerflags); 927 for (i = 0; i < runlen; i++) { 928 if (ma[i]->valid & ma[i]->dirty) { 929 pmap_page_protect(ma[i], VM_PROT_READ); 930 vm_page_flag_set(ma[i], PG_CLEANCHK); 931 932 /* 933 * maxf will end up being the actual number of pages 934 * we wrote out contiguously, non-inclusive of the 935 * first page. We do not count look-behind pages. 936 */ 937 if (i >= maxb + 1 && (maxf > i - maxb - 1)) 938 maxf = i - maxb - 1; 939 } 940 } 941 vm_page_unlock_queues(); 942 return(maxf + 1); 943} 944 945#ifdef ENABLE_VFS_IOOPT 946/* 947 * Same as vm_object_pmap_copy, except range checking really 948 * works, and is meant for small sections of an object. 949 * 950 * This code protects resident pages by making them read-only 951 * and is typically called on a fork or split when a page 952 * is converted to copy-on-write. 953 * 954 * NOTE: If the page is already at VM_PROT_NONE, calling 955 * pmap_page_protect will have no effect. 956 */ 957void 958vm_object_pmap_copy_1(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 959{ 960 vm_pindex_t idx; 961 vm_page_t p; 962 963 GIANT_REQUIRED; 964 965 if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0) 966 return; 967 vm_page_lock_queues(); 968 for (idx = start; idx < end; idx++) { 969 p = vm_page_lookup(object, idx); 970 if (p == NULL) 971 continue; 972 pmap_page_protect(p, VM_PROT_READ); 973 } 974 vm_page_unlock_queues(); 975} 976#endif 977 978/* 979 * vm_object_madvise: 980 * 981 * Implements the madvise function at the object/page level. 982 * 983 * MADV_WILLNEED (any object) 984 * 985 * Activate the specified pages if they are resident. 986 * 987 * MADV_DONTNEED (any object) 988 * 989 * Deactivate the specified pages if they are resident. 990 * 991 * MADV_FREE (OBJT_DEFAULT/OBJT_SWAP objects, 992 * OBJ_ONEMAPPING only) 993 * 994 * Deactivate and clean the specified pages if they are 995 * resident. This permits the process to reuse the pages 996 * without faulting or the kernel to reclaim the pages 997 * without I/O. 998 */ 999void 1000vm_object_madvise(vm_object_t object, vm_pindex_t pindex, int count, int advise) 1001{ 1002 vm_pindex_t end, tpindex; 1003 vm_object_t tobject; 1004 vm_page_t m; 1005 1006 if (object == NULL) 1007 return; 1008 1009 vm_object_lock(object); 1010 1011 end = pindex + count; 1012 1013 /* 1014 * Locate and adjust resident pages 1015 */ 1016 for (; pindex < end; pindex += 1) { 1017relookup: 1018 tobject = object; 1019 tpindex = pindex; 1020shadowlookup: 1021 /* 1022 * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages 1023 * and those pages must be OBJ_ONEMAPPING. 1024 */ 1025 if (advise == MADV_FREE) { 1026 if ((tobject->type != OBJT_DEFAULT && 1027 tobject->type != OBJT_SWAP) || 1028 (tobject->flags & OBJ_ONEMAPPING) == 0) { 1029 continue; 1030 } 1031 } 1032 1033 m = vm_page_lookup(tobject, tpindex); 1034 1035 if (m == NULL) { 1036 /* 1037 * There may be swap even if there is no backing page 1038 */ 1039 if (advise == MADV_FREE && tobject->type == OBJT_SWAP) 1040 swap_pager_freespace(tobject, tpindex, 1); 1041 1042 /* 1043 * next object 1044 */ 1045 tobject = tobject->backing_object; 1046 if (tobject == NULL) 1047 continue; 1048 tpindex += OFF_TO_IDX(tobject->backing_object_offset); 1049 goto shadowlookup; 1050 } 1051 1052 /* 1053 * If the page is busy or not in a normal active state, 1054 * we skip it. If the page is not managed there are no 1055 * page queues to mess with. Things can break if we mess 1056 * with pages in any of the below states. 1057 */ 1058 vm_page_lock_queues(); 1059 if (m->hold_count || 1060 m->wire_count || 1061 (m->flags & PG_UNMANAGED) || 1062 m->valid != VM_PAGE_BITS_ALL) { 1063 vm_page_unlock_queues(); 1064 continue; 1065 } 1066 if (vm_page_sleep_if_busy(m, TRUE, "madvpo")) 1067 goto relookup; 1068 if (advise == MADV_WILLNEED) { 1069 vm_page_activate(m); 1070 } else if (advise == MADV_DONTNEED) { 1071 vm_page_dontneed(m); 1072 } else if (advise == MADV_FREE) { 1073 /* 1074 * Mark the page clean. This will allow the page 1075 * to be freed up by the system. However, such pages 1076 * are often reused quickly by malloc()/free() 1077 * so we do not do anything that would cause 1078 * a page fault if we can help it. 1079 * 1080 * Specifically, we do not try to actually free 1081 * the page now nor do we try to put it in the 1082 * cache (which would cause a page fault on reuse). 1083 * 1084 * But we do make the page is freeable as we 1085 * can without actually taking the step of unmapping 1086 * it. 1087 */ 1088 pmap_clear_modify(m); 1089 m->dirty = 0; 1090 m->act_count = 0; 1091 vm_page_dontneed(m); 1092 } 1093 vm_page_unlock_queues(); 1094 if (advise == MADV_FREE && tobject->type == OBJT_SWAP) 1095 swap_pager_freespace(tobject, tpindex, 1); 1096 } 1097 vm_object_unlock(object); 1098} 1099 1100/* 1101 * vm_object_shadow: 1102 * 1103 * Create a new object which is backed by the 1104 * specified existing object range. The source 1105 * object reference is deallocated. 1106 * 1107 * The new object and offset into that object 1108 * are returned in the source parameters. 1109 */ 1110void 1111vm_object_shadow( 1112 vm_object_t *object, /* IN/OUT */ 1113 vm_ooffset_t *offset, /* IN/OUT */ 1114 vm_size_t length) 1115{ 1116 vm_object_t source; 1117 vm_object_t result; 1118 1119 source = *object; 1120 1121 vm_object_lock(source); 1122 /* 1123 * Don't create the new object if the old object isn't shared. 1124 */ 1125 if (source != NULL && 1126 source->ref_count == 1 && 1127 source->handle == NULL && 1128 (source->type == OBJT_DEFAULT || 1129 source->type == OBJT_SWAP)) { 1130 vm_object_unlock(source); 1131 return; 1132 } 1133 1134 /* 1135 * Allocate a new object with the given length 1136 */ 1137 result = vm_object_allocate(OBJT_DEFAULT, length); 1138 KASSERT(result != NULL, ("vm_object_shadow: no object for shadowing")); 1139 1140 /* 1141 * The new object shadows the source object, adding a reference to it. 1142 * Our caller changes his reference to point to the new object, 1143 * removing a reference to the source object. Net result: no change 1144 * of reference count. 1145 * 1146 * Try to optimize the result object's page color when shadowing 1147 * in order to maintain page coloring consistency in the combined 1148 * shadowed object. 1149 */ 1150 result->backing_object = source; 1151 if (source) { 1152 TAILQ_INSERT_TAIL(&source->shadow_head, result, shadow_list); 1153 source->shadow_count++; 1154 source->generation++; 1155 if (length < source->size) 1156 length = source->size; 1157 if (length > PQ_L2_SIZE / 3 + PQ_PRIME1 || 1158 source->generation > 1) 1159 length = PQ_L2_SIZE / 3 + PQ_PRIME1; 1160 result->pg_color = (source->pg_color + 1161 length * source->generation) & PQ_L2_MASK; 1162 next_index = (result->pg_color + PQ_L2_SIZE / 3 + PQ_PRIME1) & 1163 PQ_L2_MASK; 1164 } 1165 1166 /* 1167 * Store the offset into the source object, and fix up the offset into 1168 * the new object. 1169 */ 1170 result->backing_object_offset = *offset; 1171 1172 /* 1173 * Return the new things 1174 */ 1175 *offset = 0; 1176 *object = result; 1177 1178 vm_object_unlock(source); 1179} 1180 1181/* 1182 * vm_object_split: 1183 * 1184 * Split the pages in a map entry into a new object. This affords 1185 * easier removal of unused pages, and keeps object inheritance from 1186 * being a negative impact on memory usage. 1187 */ 1188void 1189vm_object_split(vm_map_entry_t entry) 1190{ 1191 vm_page_t m; 1192 vm_object_t orig_object, new_object, source; 1193 vm_offset_t s, e; 1194 vm_pindex_t offidxstart, offidxend; 1195 vm_size_t idx, size; 1196 vm_ooffset_t offset; 1197 1198 GIANT_REQUIRED; 1199 1200 orig_object = entry->object.vm_object; 1201 if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP) 1202 return; 1203 if (orig_object->ref_count <= 1) 1204 return; 1205 1206 offset = entry->offset; 1207 s = entry->start; 1208 e = entry->end; 1209 1210 offidxstart = OFF_TO_IDX(offset); 1211 offidxend = offidxstart + OFF_TO_IDX(e - s); 1212 size = offidxend - offidxstart; 1213 1214 new_object = vm_pager_allocate(orig_object->type, 1215 NULL, IDX_TO_OFF(size), VM_PROT_ALL, 0LL); 1216 if (new_object == NULL) 1217 return; 1218 1219 source = orig_object->backing_object; 1220 if (source != NULL) { 1221 vm_object_reference(source); /* Referenced by new_object */ 1222 TAILQ_INSERT_TAIL(&source->shadow_head, 1223 new_object, shadow_list); 1224 vm_object_clear_flag(source, OBJ_ONEMAPPING); 1225 new_object->backing_object_offset = 1226 orig_object->backing_object_offset + offset; 1227 new_object->backing_object = source; 1228 source->shadow_count++; 1229 source->generation++; 1230 } 1231 for (idx = 0; idx < size; idx++) { 1232 retry: 1233 m = vm_page_lookup(orig_object, offidxstart + idx); 1234 if (m == NULL) 1235 continue; 1236 1237 /* 1238 * We must wait for pending I/O to complete before we can 1239 * rename the page. 1240 * 1241 * We do not have to VM_PROT_NONE the page as mappings should 1242 * not be changed by this operation. 1243 */ 1244 vm_page_lock_queues(); 1245 if (vm_page_sleep_if_busy(m, TRUE, "spltwt")) 1246 goto retry; 1247 1248 vm_page_busy(m); 1249 vm_page_unlock_queues(); 1250 vm_page_rename(m, new_object, idx); 1251 /* page automatically made dirty by rename and cache handled */ 1252 vm_page_lock_queues(); 1253 vm_page_busy(m); 1254 vm_page_unlock_queues(); 1255 } 1256 if (orig_object->type == OBJT_SWAP) { 1257 vm_object_pip_add(orig_object, 1); 1258 /* 1259 * copy orig_object pages into new_object 1260 * and destroy unneeded pages in 1261 * shadow object. 1262 */ 1263 swap_pager_copy(orig_object, new_object, offidxstart, 0); 1264 vm_object_pip_wakeup(orig_object); 1265 } 1266 TAILQ_FOREACH(m, &new_object->memq, listq) 1267 vm_page_wakeup(m); 1268 entry->object.vm_object = new_object; 1269 entry->offset = 0LL; 1270 vm_object_deallocate(orig_object); 1271} 1272 1273#define OBSC_TEST_ALL_SHADOWED 0x0001 1274#define OBSC_COLLAPSE_NOWAIT 0x0002 1275#define OBSC_COLLAPSE_WAIT 0x0004 1276 1277static __inline int 1278vm_object_backing_scan(vm_object_t object, int op) 1279{ 1280 int s; 1281 int r = 1; 1282 vm_page_t p; 1283 vm_object_t backing_object; 1284 vm_pindex_t backing_offset_index; 1285 1286 s = splvm(); 1287 GIANT_REQUIRED; 1288 1289 backing_object = object->backing_object; 1290 backing_offset_index = OFF_TO_IDX(object->backing_object_offset); 1291 1292 /* 1293 * Initial conditions 1294 */ 1295 if (op & OBSC_TEST_ALL_SHADOWED) { 1296 /* 1297 * We do not want to have to test for the existence of 1298 * swap pages in the backing object. XXX but with the 1299 * new swapper this would be pretty easy to do. 1300 * 1301 * XXX what about anonymous MAP_SHARED memory that hasn't 1302 * been ZFOD faulted yet? If we do not test for this, the 1303 * shadow test may succeed! XXX 1304 */ 1305 if (backing_object->type != OBJT_DEFAULT) { 1306 splx(s); 1307 return (0); 1308 } 1309 } 1310 if (op & OBSC_COLLAPSE_WAIT) { 1311 vm_object_set_flag(backing_object, OBJ_DEAD); 1312 } 1313 1314 /* 1315 * Our scan 1316 */ 1317 p = TAILQ_FIRST(&backing_object->memq); 1318 while (p) { 1319 vm_page_t next = TAILQ_NEXT(p, listq); 1320 vm_pindex_t new_pindex = p->pindex - backing_offset_index; 1321 1322 if (op & OBSC_TEST_ALL_SHADOWED) { 1323 vm_page_t pp; 1324 1325 /* 1326 * Ignore pages outside the parent object's range 1327 * and outside the parent object's mapping of the 1328 * backing object. 1329 * 1330 * note that we do not busy the backing object's 1331 * page. 1332 */ 1333 if ( 1334 p->pindex < backing_offset_index || 1335 new_pindex >= object->size 1336 ) { 1337 p = next; 1338 continue; 1339 } 1340 1341 /* 1342 * See if the parent has the page or if the parent's 1343 * object pager has the page. If the parent has the 1344 * page but the page is not valid, the parent's 1345 * object pager must have the page. 1346 * 1347 * If this fails, the parent does not completely shadow 1348 * the object and we might as well give up now. 1349 */ 1350 1351 pp = vm_page_lookup(object, new_pindex); 1352 if ( 1353 (pp == NULL || pp->valid == 0) && 1354 !vm_pager_has_page(object, new_pindex, NULL, NULL) 1355 ) { 1356 r = 0; 1357 break; 1358 } 1359 } 1360 1361 /* 1362 * Check for busy page 1363 */ 1364 if (op & (OBSC_COLLAPSE_WAIT | OBSC_COLLAPSE_NOWAIT)) { 1365 vm_page_t pp; 1366 1367 vm_page_lock_queues(); 1368 if (op & OBSC_COLLAPSE_NOWAIT) { 1369 if ((p->flags & PG_BUSY) || 1370 !p->valid || 1371 p->hold_count || 1372 p->wire_count || 1373 p->busy) { 1374 vm_page_unlock_queues(); 1375 p = next; 1376 continue; 1377 } 1378 } else if (op & OBSC_COLLAPSE_WAIT) { 1379 if (vm_page_sleep_if_busy(p, TRUE, "vmocol")) { 1380 /* 1381 * If we slept, anything could have 1382 * happened. Since the object is 1383 * marked dead, the backing offset 1384 * should not have changed so we 1385 * just restart our scan. 1386 */ 1387 p = TAILQ_FIRST(&backing_object->memq); 1388 continue; 1389 } 1390 } 1391 1392 /* 1393 * Busy the page 1394 */ 1395 vm_page_busy(p); 1396 vm_page_unlock_queues(); 1397 1398 KASSERT( 1399 p->object == backing_object, 1400 ("vm_object_qcollapse(): object mismatch") 1401 ); 1402 1403 /* 1404 * Destroy any associated swap 1405 */ 1406 if (backing_object->type == OBJT_SWAP) { 1407 swap_pager_freespace( 1408 backing_object, 1409 p->pindex, 1410 1 1411 ); 1412 } 1413 1414 if ( 1415 p->pindex < backing_offset_index || 1416 new_pindex >= object->size 1417 ) { 1418 /* 1419 * Page is out of the parent object's range, we 1420 * can simply destroy it. 1421 */ 1422 vm_page_lock_queues(); 1423 pmap_remove_all(p); 1424 vm_page_free(p); 1425 vm_page_unlock_queues(); 1426 p = next; 1427 continue; 1428 } 1429 1430 pp = vm_page_lookup(object, new_pindex); 1431 if ( 1432 pp != NULL || 1433 vm_pager_has_page(object, new_pindex, NULL, NULL) 1434 ) { 1435 /* 1436 * page already exists in parent OR swap exists 1437 * for this location in the parent. Destroy 1438 * the original page from the backing object. 1439 * 1440 * Leave the parent's page alone 1441 */ 1442 vm_page_lock_queues(); 1443 pmap_remove_all(p); 1444 vm_page_free(p); 1445 vm_page_unlock_queues(); 1446 p = next; 1447 continue; 1448 } 1449 1450 /* 1451 * Page does not exist in parent, rename the 1452 * page from the backing object to the main object. 1453 * 1454 * If the page was mapped to a process, it can remain 1455 * mapped through the rename. 1456 */ 1457 vm_page_rename(p, object, new_pindex); 1458 /* page automatically made dirty by rename */ 1459 } 1460 p = next; 1461 } 1462 splx(s); 1463 return (r); 1464} 1465 1466 1467/* 1468 * this version of collapse allows the operation to occur earlier and 1469 * when paging_in_progress is true for an object... This is not a complete 1470 * operation, but should plug 99.9% of the rest of the leaks. 1471 */ 1472static void 1473vm_object_qcollapse(vm_object_t object) 1474{ 1475 vm_object_t backing_object = object->backing_object; 1476 1477 GIANT_REQUIRED; 1478 1479 if (backing_object->ref_count != 1) 1480 return; 1481 1482 backing_object->ref_count += 2; 1483 1484 vm_object_backing_scan(object, OBSC_COLLAPSE_NOWAIT); 1485 1486 backing_object->ref_count -= 2; 1487} 1488 1489/* 1490 * vm_object_collapse: 1491 * 1492 * Collapse an object with the object backing it. 1493 * Pages in the backing object are moved into the 1494 * parent, and the backing object is deallocated. 1495 */ 1496void 1497vm_object_collapse(vm_object_t object) 1498{ 1499 GIANT_REQUIRED; 1500 1501 while (TRUE) { 1502 vm_object_t backing_object; 1503 1504 /* 1505 * Verify that the conditions are right for collapse: 1506 * 1507 * The object exists and the backing object exists. 1508 */ 1509 if (object == NULL) 1510 break; 1511 1512 if ((backing_object = object->backing_object) == NULL) 1513 break; 1514 1515 /* 1516 * we check the backing object first, because it is most likely 1517 * not collapsable. 1518 */ 1519 if (backing_object->handle != NULL || 1520 (backing_object->type != OBJT_DEFAULT && 1521 backing_object->type != OBJT_SWAP) || 1522 (backing_object->flags & OBJ_DEAD) || 1523 object->handle != NULL || 1524 (object->type != OBJT_DEFAULT && 1525 object->type != OBJT_SWAP) || 1526 (object->flags & OBJ_DEAD)) { 1527 break; 1528 } 1529 1530 if ( 1531 object->paging_in_progress != 0 || 1532 backing_object->paging_in_progress != 0 1533 ) { 1534 vm_object_qcollapse(object); 1535 break; 1536 } 1537 1538 /* 1539 * We know that we can either collapse the backing object (if 1540 * the parent is the only reference to it) or (perhaps) have 1541 * the parent bypass the object if the parent happens to shadow 1542 * all the resident pages in the entire backing object. 1543 * 1544 * This is ignoring pager-backed pages such as swap pages. 1545 * vm_object_backing_scan fails the shadowing test in this 1546 * case. 1547 */ 1548 if (backing_object->ref_count == 1) { 1549 /* 1550 * If there is exactly one reference to the backing 1551 * object, we can collapse it into the parent. 1552 */ 1553 vm_object_backing_scan(object, OBSC_COLLAPSE_WAIT); 1554 1555 /* 1556 * Move the pager from backing_object to object. 1557 */ 1558 if (backing_object->type == OBJT_SWAP) { 1559 vm_object_pip_add(backing_object, 1); 1560 1561 /* 1562 * scrap the paging_offset junk and do a 1563 * discrete copy. This also removes major 1564 * assumptions about how the swap-pager 1565 * works from where it doesn't belong. The 1566 * new swapper is able to optimize the 1567 * destroy-source case. 1568 */ 1569 vm_object_pip_add(object, 1); 1570 swap_pager_copy( 1571 backing_object, 1572 object, 1573 OFF_TO_IDX(object->backing_object_offset), TRUE); 1574 vm_object_pip_wakeup(object); 1575 1576 vm_object_pip_wakeup(backing_object); 1577 } 1578 /* 1579 * Object now shadows whatever backing_object did. 1580 * Note that the reference to 1581 * backing_object->backing_object moves from within 1582 * backing_object to within object. 1583 */ 1584 TAILQ_REMOVE( 1585 &object->backing_object->shadow_head, 1586 object, 1587 shadow_list 1588 ); 1589 object->backing_object->shadow_count--; 1590 object->backing_object->generation++; 1591 if (backing_object->backing_object) { 1592 TAILQ_REMOVE( 1593 &backing_object->backing_object->shadow_head, 1594 backing_object, 1595 shadow_list 1596 ); 1597 backing_object->backing_object->shadow_count--; 1598 backing_object->backing_object->generation++; 1599 } 1600 object->backing_object = backing_object->backing_object; 1601 if (object->backing_object) { 1602 TAILQ_INSERT_TAIL( 1603 &object->backing_object->shadow_head, 1604 object, 1605 shadow_list 1606 ); 1607 object->backing_object->shadow_count++; 1608 object->backing_object->generation++; 1609 } 1610 1611 object->backing_object_offset += 1612 backing_object->backing_object_offset; 1613 1614 /* 1615 * Discard backing_object. 1616 * 1617 * Since the backing object has no pages, no pager left, 1618 * and no object references within it, all that is 1619 * necessary is to dispose of it. 1620 */ 1621 KASSERT(backing_object->ref_count == 1, ("backing_object %p was somehow re-referenced during collapse!", backing_object)); 1622 KASSERT(TAILQ_FIRST(&backing_object->memq) == NULL, ("backing_object %p somehow has left over pages during collapse!", backing_object)); 1623 1624 mtx_lock(&vm_object_list_mtx); 1625 TAILQ_REMOVE( 1626 &vm_object_list, 1627 backing_object, 1628 object_list 1629 ); 1630 mtx_unlock(&vm_object_list_mtx); 1631 1632 uma_zfree(obj_zone, backing_object); 1633 1634 object_collapses++; 1635 } else { 1636 vm_object_t new_backing_object; 1637 1638 /* 1639 * If we do not entirely shadow the backing object, 1640 * there is nothing we can do so we give up. 1641 */ 1642 if (vm_object_backing_scan(object, OBSC_TEST_ALL_SHADOWED) == 0) { 1643 break; 1644 } 1645 1646 /* 1647 * Make the parent shadow the next object in the 1648 * chain. Deallocating backing_object will not remove 1649 * it, since its reference count is at least 2. 1650 */ 1651 TAILQ_REMOVE( 1652 &backing_object->shadow_head, 1653 object, 1654 shadow_list 1655 ); 1656 backing_object->shadow_count--; 1657 backing_object->generation++; 1658 1659 new_backing_object = backing_object->backing_object; 1660 if ((object->backing_object = new_backing_object) != NULL) { 1661 vm_object_reference(new_backing_object); 1662 TAILQ_INSERT_TAIL( 1663 &new_backing_object->shadow_head, 1664 object, 1665 shadow_list 1666 ); 1667 new_backing_object->shadow_count++; 1668 new_backing_object->generation++; 1669 object->backing_object_offset += 1670 backing_object->backing_object_offset; 1671 } 1672 1673 /* 1674 * Drop the reference count on backing_object. Since 1675 * its ref_count was at least 2, it will not vanish; 1676 * so we don't need to call vm_object_deallocate, but 1677 * we do anyway. 1678 */ 1679 vm_object_deallocate(backing_object); 1680 object_bypasses++; 1681 } 1682 1683 /* 1684 * Try again with this object's new backing object. 1685 */ 1686 } 1687} 1688 1689/* 1690 * vm_object_page_remove: [internal] 1691 * 1692 * Removes all physical pages in the specified 1693 * object range from the object's list of pages. 1694 * 1695 * The object must be locked. 1696 */ 1697void 1698vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, boolean_t clean_only) 1699{ 1700 vm_page_t p, next; 1701 vm_pindex_t size; 1702 int all; 1703 1704 if (object == NULL || 1705 object->resident_page_count == 0) 1706 return; 1707 all = ((end == 0) && (start == 0)); 1708 1709 /* 1710 * Since physically-backed objects do not use managed pages, we can't 1711 * remove pages from the object (we must instead remove the page 1712 * references, and then destroy the object). 1713 */ 1714 KASSERT(object->type != OBJT_PHYS, ("attempt to remove pages from a physical object")); 1715 1716 vm_object_pip_add(object, 1); 1717again: 1718 vm_page_lock_queues(); 1719 size = end - start; 1720 if (all || size > object->resident_page_count / 4) { 1721 for (p = TAILQ_FIRST(&object->memq); p != NULL; p = next) { 1722 next = TAILQ_NEXT(p, listq); 1723 if (all || ((start <= p->pindex) && (p->pindex < end))) { 1724 if (p->wire_count != 0) { 1725 pmap_remove_all(p); 1726 if (!clean_only) 1727 p->valid = 0; 1728 continue; 1729 } 1730 1731 /* 1732 * The busy flags are only cleared at 1733 * interrupt -- minimize the spl transitions 1734 */ 1735 if (vm_page_sleep_if_busy(p, TRUE, "vmopar")) 1736 goto again; 1737 1738 if (clean_only && p->valid) { 1739 vm_page_test_dirty(p); 1740 if (p->valid & p->dirty) 1741 continue; 1742 } 1743 vm_page_busy(p); 1744 pmap_remove_all(p); 1745 vm_page_free(p); 1746 } 1747 } 1748 } else { 1749 while (size > 0) { 1750 if ((p = vm_page_lookup(object, start)) != NULL) { 1751 if (p->wire_count != 0) { 1752 pmap_remove_all(p); 1753 if (!clean_only) 1754 p->valid = 0; 1755 start += 1; 1756 size -= 1; 1757 continue; 1758 } 1759 1760 /* 1761 * The busy flags are only cleared at 1762 * interrupt -- minimize the spl transitions 1763 */ 1764 if (vm_page_sleep_if_busy(p, TRUE, "vmopar")) 1765 goto again; 1766 1767 if (clean_only && p->valid) { 1768 vm_page_test_dirty(p); 1769 if (p->valid & p->dirty) { 1770 start += 1; 1771 size -= 1; 1772 continue; 1773 } 1774 } 1775 vm_page_busy(p); 1776 pmap_remove_all(p); 1777 vm_page_free(p); 1778 } 1779 start += 1; 1780 size -= 1; 1781 } 1782 } 1783 vm_page_unlock_queues(); 1784 vm_object_pip_wakeup(object); 1785} 1786 1787/* 1788 * Routine: vm_object_coalesce 1789 * Function: Coalesces two objects backing up adjoining 1790 * regions of memory into a single object. 1791 * 1792 * returns TRUE if objects were combined. 1793 * 1794 * NOTE: Only works at the moment if the second object is NULL - 1795 * if it's not, which object do we lock first? 1796 * 1797 * Parameters: 1798 * prev_object First object to coalesce 1799 * prev_offset Offset into prev_object 1800 * next_object Second object into coalesce 1801 * next_offset Offset into next_object 1802 * 1803 * prev_size Size of reference to prev_object 1804 * next_size Size of reference to next_object 1805 * 1806 * Conditions: 1807 * The object must *not* be locked. 1808 */ 1809boolean_t 1810vm_object_coalesce(vm_object_t prev_object, vm_pindex_t prev_pindex, 1811 vm_size_t prev_size, vm_size_t next_size) 1812{ 1813 vm_pindex_t next_pindex; 1814 1815 if (prev_object == NULL) 1816 return (TRUE); 1817 vm_object_lock(prev_object); 1818 if (prev_object->type != OBJT_DEFAULT && 1819 prev_object->type != OBJT_SWAP) { 1820 vm_object_unlock(prev_object); 1821 return (FALSE); 1822 } 1823 1824 /* 1825 * Try to collapse the object first 1826 */ 1827 vm_object_collapse(prev_object); 1828 1829 /* 1830 * Can't coalesce if: . more than one reference . paged out . shadows 1831 * another object . has a copy elsewhere (any of which mean that the 1832 * pages not mapped to prev_entry may be in use anyway) 1833 */ 1834 if (prev_object->backing_object != NULL) { 1835 vm_object_unlock(prev_object); 1836 return (FALSE); 1837 } 1838 1839 prev_size >>= PAGE_SHIFT; 1840 next_size >>= PAGE_SHIFT; 1841 next_pindex = prev_pindex + prev_size; 1842 1843 if ((prev_object->ref_count > 1) && 1844 (prev_object->size != next_pindex)) { 1845 vm_object_unlock(prev_object); 1846 return (FALSE); 1847 } 1848 1849 /* 1850 * Remove any pages that may still be in the object from a previous 1851 * deallocation. 1852 */ 1853 if (next_pindex < prev_object->size) { 1854 vm_object_page_remove(prev_object, 1855 next_pindex, 1856 next_pindex + next_size, FALSE); 1857 if (prev_object->type == OBJT_SWAP) 1858 swap_pager_freespace(prev_object, 1859 next_pindex, next_size); 1860 } 1861 1862 /* 1863 * Extend the object if necessary. 1864 */ 1865 if (next_pindex + next_size > prev_object->size) 1866 prev_object->size = next_pindex + next_size; 1867 1868 vm_object_unlock(prev_object); 1869 return (TRUE); 1870} 1871 1872void 1873vm_object_set_writeable_dirty(vm_object_t object) 1874{ 1875 struct vnode *vp; 1876 1877 vm_object_set_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY); 1878 if (object->type == OBJT_VNODE && 1879 (vp = (struct vnode *)object->handle) != NULL) { 1880 VI_LOCK(vp); 1881 if ((vp->v_iflag & VI_OBJDIRTY) == 0) 1882 vp->v_iflag |= VI_OBJDIRTY; 1883 VI_UNLOCK(vp); 1884 } 1885} 1886 1887#ifdef ENABLE_VFS_IOOPT 1888/* 1889 * Experimental support for zero-copy I/O 1890 * 1891 * Performs the copy_on_write operations necessary to allow the virtual copies 1892 * into user space to work. This has to be called for write(2) system calls 1893 * from other processes, file unlinking, and file size shrinkage. 1894 */ 1895void 1896vm_freeze_copyopts(vm_object_t object, vm_pindex_t froma, vm_pindex_t toa) 1897{ 1898 int rv; 1899 vm_object_t robject; 1900 vm_pindex_t idx; 1901 1902 GIANT_REQUIRED; 1903 if ((object == NULL) || 1904 ((object->flags & OBJ_OPT) == 0)) 1905 return; 1906 1907 if (object->shadow_count > object->ref_count) 1908 panic("vm_freeze_copyopts: sc > rc"); 1909 1910 while ((robject = TAILQ_FIRST(&object->shadow_head)) != NULL) { 1911 vm_pindex_t bo_pindex; 1912 vm_page_t m_in, m_out; 1913 1914 bo_pindex = OFF_TO_IDX(robject->backing_object_offset); 1915 1916 vm_object_reference(robject); 1917 1918 vm_object_pip_wait(robject, "objfrz"); 1919 1920 if (robject->ref_count == 1) { 1921 vm_object_deallocate(robject); 1922 continue; 1923 } 1924 1925 vm_object_pip_add(robject, 1); 1926 1927 for (idx = 0; idx < robject->size; idx++) { 1928 1929 m_out = vm_page_grab(robject, idx, 1930 VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 1931 1932 if (m_out->valid == 0) { 1933 m_in = vm_page_grab(object, bo_pindex + idx, 1934 VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 1935 vm_page_lock_queues(); 1936 if (m_in->valid == 0) { 1937 vm_page_unlock_queues(); 1938 rv = vm_pager_get_pages(object, &m_in, 1, 0); 1939 if (rv != VM_PAGER_OK) { 1940 printf("vm_freeze_copyopts: cannot read page from file: %lx\n", (long)m_in->pindex); 1941 continue; 1942 } 1943 vm_page_lock_queues(); 1944 vm_page_deactivate(m_in); 1945 } 1946 1947 pmap_remove_all(m_in); 1948 vm_page_unlock_queues(); 1949 pmap_copy_page(m_in, m_out); 1950 m_out->valid = m_in->valid; 1951 vm_page_dirty(m_out); 1952 vm_page_lock_queues(); 1953 vm_page_activate(m_out); 1954 vm_page_unlock_queues(); 1955 vm_page_wakeup(m_in); 1956 } 1957 vm_page_wakeup(m_out); 1958 } 1959 1960 object->shadow_count--; 1961 object->ref_count--; 1962 TAILQ_REMOVE(&object->shadow_head, robject, shadow_list); 1963 robject->backing_object = NULL; 1964 robject->backing_object_offset = 0; 1965 1966 vm_object_pip_wakeup(robject); 1967 vm_object_deallocate(robject); 1968 } 1969 1970 vm_object_clear_flag(object, OBJ_OPT); 1971} 1972#endif 1973 1974#include "opt_ddb.h" 1975#ifdef DDB 1976#include <sys/kernel.h> 1977 1978#include <sys/cons.h> 1979 1980#include <ddb/ddb.h> 1981 1982static int 1983_vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry) 1984{ 1985 vm_map_t tmpm; 1986 vm_map_entry_t tmpe; 1987 vm_object_t obj; 1988 int entcount; 1989 1990 if (map == 0) 1991 return 0; 1992 1993 if (entry == 0) { 1994 tmpe = map->header.next; 1995 entcount = map->nentries; 1996 while (entcount-- && (tmpe != &map->header)) { 1997 if (_vm_object_in_map(map, object, tmpe)) { 1998 return 1; 1999 } 2000 tmpe = tmpe->next; 2001 } 2002 } else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 2003 tmpm = entry->object.sub_map; 2004 tmpe = tmpm->header.next; 2005 entcount = tmpm->nentries; 2006 while (entcount-- && tmpe != &tmpm->header) { 2007 if (_vm_object_in_map(tmpm, object, tmpe)) { 2008 return 1; 2009 } 2010 tmpe = tmpe->next; 2011 } 2012 } else if ((obj = entry->object.vm_object) != NULL) { 2013 for (; obj; obj = obj->backing_object) 2014 if (obj == object) { 2015 return 1; 2016 } 2017 } 2018 return 0; 2019} 2020 2021static int 2022vm_object_in_map(vm_object_t object) 2023{ 2024 struct proc *p; 2025 2026 /* sx_slock(&allproc_lock); */ 2027 LIST_FOREACH(p, &allproc, p_list) { 2028 if (!p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */) 2029 continue; 2030 if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) { 2031 /* sx_sunlock(&allproc_lock); */ 2032 return 1; 2033 } 2034 } 2035 /* sx_sunlock(&allproc_lock); */ 2036 if (_vm_object_in_map(kernel_map, object, 0)) 2037 return 1; 2038 if (_vm_object_in_map(kmem_map, object, 0)) 2039 return 1; 2040 if (_vm_object_in_map(pager_map, object, 0)) 2041 return 1; 2042 if (_vm_object_in_map(buffer_map, object, 0)) 2043 return 1; 2044 return 0; 2045} 2046 2047DB_SHOW_COMMAND(vmochk, vm_object_check) 2048{ 2049 vm_object_t object; 2050 2051 /* 2052 * make sure that internal objs are in a map somewhere 2053 * and none have zero ref counts. 2054 */ 2055 TAILQ_FOREACH(object, &vm_object_list, object_list) { 2056 if (object->handle == NULL && 2057 (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) { 2058 if (object->ref_count == 0) { 2059 db_printf("vmochk: internal obj has zero ref count: %ld\n", 2060 (long)object->size); 2061 } 2062 if (!vm_object_in_map(object)) { 2063 db_printf( 2064 "vmochk: internal obj is not in a map: " 2065 "ref: %d, size: %lu: 0x%lx, backing_object: %p\n", 2066 object->ref_count, (u_long)object->size, 2067 (u_long)object->size, 2068 (void *)object->backing_object); 2069 } 2070 } 2071 } 2072} 2073 2074/* 2075 * vm_object_print: [ debug ] 2076 */ 2077DB_SHOW_COMMAND(object, vm_object_print_static) 2078{ 2079 /* XXX convert args. */ 2080 vm_object_t object = (vm_object_t)addr; 2081 boolean_t full = have_addr; 2082 2083 vm_page_t p; 2084 2085 /* XXX count is an (unused) arg. Avoid shadowing it. */ 2086#define count was_count 2087 2088 int count; 2089 2090 if (object == NULL) 2091 return; 2092 2093 db_iprintf( 2094 "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x\n", 2095 object, (int)object->type, (uintmax_t)object->size, 2096 object->resident_page_count, object->ref_count, object->flags); 2097 db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n", 2098 object->shadow_count, 2099 object->backing_object ? object->backing_object->ref_count : 0, 2100 object->backing_object, (uintmax_t)object->backing_object_offset); 2101 2102 if (!full) 2103 return; 2104 2105 db_indent += 2; 2106 count = 0; 2107 TAILQ_FOREACH(p, &object->memq, listq) { 2108 if (count == 0) 2109 db_iprintf("memory:="); 2110 else if (count == 6) { 2111 db_printf("\n"); 2112 db_iprintf(" ..."); 2113 count = 0; 2114 } else 2115 db_printf(","); 2116 count++; 2117 2118 db_printf("(off=0x%jx,page=0x%jx)", 2119 (uintmax_t)p->pindex, (uintmax_t)VM_PAGE_TO_PHYS(p)); 2120 } 2121 if (count != 0) 2122 db_printf("\n"); 2123 db_indent -= 2; 2124} 2125 2126/* XXX. */ 2127#undef count 2128 2129/* XXX need this non-static entry for calling from vm_map_print. */ 2130void 2131vm_object_print( 2132 /* db_expr_t */ long addr, 2133 boolean_t have_addr, 2134 /* db_expr_t */ long count, 2135 char *modif) 2136{ 2137 vm_object_print_static(addr, have_addr, count, modif); 2138} 2139 2140DB_SHOW_COMMAND(vmopag, vm_object_print_pages) 2141{ 2142 vm_object_t object; 2143 int nl = 0; 2144 int c; 2145 2146 TAILQ_FOREACH(object, &vm_object_list, object_list) { 2147 vm_pindex_t idx, fidx; 2148 vm_pindex_t osize; 2149 vm_offset_t pa = -1, padiff; 2150 int rcount; 2151 vm_page_t m; 2152 2153 db_printf("new object: %p\n", (void *)object); 2154 if (nl > 18) { 2155 c = cngetc(); 2156 if (c != ' ') 2157 return; 2158 nl = 0; 2159 } 2160 nl++; 2161 rcount = 0; 2162 fidx = 0; 2163 osize = object->size; 2164 if (osize > 128) 2165 osize = 128; 2166 for (idx = 0; idx < osize; idx++) { 2167 m = vm_page_lookup(object, idx); 2168 if (m == NULL) { 2169 if (rcount) { 2170 db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 2171 (long)fidx, rcount, (long)pa); 2172 if (nl > 18) { 2173 c = cngetc(); 2174 if (c != ' ') 2175 return; 2176 nl = 0; 2177 } 2178 nl++; 2179 rcount = 0; 2180 } 2181 continue; 2182 } 2183 2184 2185 if (rcount && 2186 (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) { 2187 ++rcount; 2188 continue; 2189 } 2190 if (rcount) { 2191 padiff = pa + rcount * PAGE_SIZE - VM_PAGE_TO_PHYS(m); 2192 padiff >>= PAGE_SHIFT; 2193 padiff &= PQ_L2_MASK; 2194 if (padiff == 0) { 2195 pa = VM_PAGE_TO_PHYS(m) - rcount * PAGE_SIZE; 2196 ++rcount; 2197 continue; 2198 } 2199 db_printf(" index(%ld)run(%d)pa(0x%lx)", 2200 (long)fidx, rcount, (long)pa); 2201 db_printf("pd(%ld)\n", (long)padiff); 2202 if (nl > 18) { 2203 c = cngetc(); 2204 if (c != ' ') 2205 return; 2206 nl = 0; 2207 } 2208 nl++; 2209 } 2210 fidx = idx; 2211 pa = VM_PAGE_TO_PHYS(m); 2212 rcount = 1; 2213 } 2214 if (rcount) { 2215 db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 2216 (long)fidx, rcount, (long)pa); 2217 if (nl > 18) { 2218 c = cngetc(); 2219 if (c != ' ') 2220 return; 2221 nl = 0; 2222 } 2223 nl++; 2224 } 2225 } 2226} 2227#endif /* DDB */ 2228