vm_object.c revision 106720
1/* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $FreeBSD: head/sys/vm/vm_object.c 106720 2002-11-10 07:12:04Z alc $ 65 */ 66 67/* 68 * Virtual memory object module. 69 */ 70 71#include <sys/param.h> 72#include <sys/systm.h> 73#include <sys/lock.h> 74#include <sys/mman.h> 75#include <sys/mount.h> 76#include <sys/kernel.h> 77#include <sys/sysctl.h> 78#include <sys/mutex.h> 79#include <sys/proc.h> /* for curproc, pageproc */ 80#include <sys/socket.h> 81#include <sys/stdint.h> 82#include <sys/vnode.h> 83#include <sys/vmmeter.h> 84#include <sys/sx.h> 85 86#include <vm/vm.h> 87#include <vm/vm_param.h> 88#include <vm/pmap.h> 89#include <vm/vm_map.h> 90#include <vm/vm_object.h> 91#include <vm/vm_page.h> 92#include <vm/vm_pageout.h> 93#include <vm/vm_pager.h> 94#include <vm/swap_pager.h> 95#include <vm/vm_kern.h> 96#include <vm/vm_extern.h> 97#include <vm/uma.h> 98 99#define EASY_SCAN_FACTOR 8 100 101#define MSYNC_FLUSH_HARDSEQ 0x01 102#define MSYNC_FLUSH_SOFTSEQ 0x02 103 104/* 105 * msync / VM object flushing optimizations 106 */ 107static int msync_flush_flags = MSYNC_FLUSH_HARDSEQ | MSYNC_FLUSH_SOFTSEQ; 108SYSCTL_INT(_vm, OID_AUTO, msync_flush_flags, 109 CTLFLAG_RW, &msync_flush_flags, 0, ""); 110 111static void vm_object_qcollapse(vm_object_t object); 112static int vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration, int pagerflags); 113 114/* 115 * Virtual memory objects maintain the actual data 116 * associated with allocated virtual memory. A given 117 * page of memory exists within exactly one object. 118 * 119 * An object is only deallocated when all "references" 120 * are given up. Only one "reference" to a given 121 * region of an object should be writeable. 122 * 123 * Associated with each object is a list of all resident 124 * memory pages belonging to that object; this list is 125 * maintained by the "vm_page" module, and locked by the object's 126 * lock. 127 * 128 * Each object also records a "pager" routine which is 129 * used to retrieve (and store) pages to the proper backing 130 * storage. In addition, objects may be backed by other 131 * objects from which they were virtual-copied. 132 * 133 * The only items within the object structure which are 134 * modified after time of creation are: 135 * reference count locked by object's lock 136 * pager routine locked by object's lock 137 * 138 */ 139 140struct object_q vm_object_list; 141struct mtx vm_object_list_mtx; /* lock for object list and count */ 142vm_object_t kernel_object; 143vm_object_t kmem_object; 144static struct vm_object kernel_object_store; 145static struct vm_object kmem_object_store; 146extern int vm_pageout_page_count; 147 148static long object_collapses; 149static long object_bypasses; 150static int next_index; 151static uma_zone_t obj_zone; 152#define VM_OBJECTS_INIT 256 153 154static void vm_object_zinit(void *mem, int size); 155 156#ifdef INVARIANTS 157static void vm_object_zdtor(void *mem, int size, void *arg); 158 159static void 160vm_object_zdtor(void *mem, int size, void *arg) 161{ 162 vm_object_t object; 163 164 object = (vm_object_t)mem; 165 KASSERT(object->paging_in_progress == 0, 166 ("object %p paging_in_progress = %d", 167 object, object->paging_in_progress)); 168 KASSERT(object->resident_page_count == 0, 169 ("object %p resident_page_count = %d", 170 object, object->resident_page_count)); 171 KASSERT(object->shadow_count == 0, 172 ("object %p shadow_count = %d", 173 object, object->shadow_count)); 174} 175#endif 176 177static void 178vm_object_zinit(void *mem, int size) 179{ 180 vm_object_t object; 181 182 object = (vm_object_t)mem; 183 184 /* These are true for any object that has been freed */ 185 object->paging_in_progress = 0; 186 object->resident_page_count = 0; 187 object->shadow_count = 0; 188} 189 190void 191_vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object) 192{ 193 static int object_hash_rand; 194 int exp, incr; 195 196 TAILQ_INIT(&object->memq); 197 TAILQ_INIT(&object->shadow_head); 198 199 object->root = NULL; 200 object->type = type; 201 object->size = size; 202 object->ref_count = 1; 203 object->flags = 0; 204 if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP)) 205 vm_object_set_flag(object, OBJ_ONEMAPPING); 206 if (size > (PQ_L2_SIZE / 3 + PQ_PRIME1)) 207 incr = PQ_L2_SIZE / 3 + PQ_PRIME1; 208 else 209 incr = size; 210 do 211 object->pg_color = next_index; 212 while (!atomic_cmpset_int(&next_index, object->pg_color, 213 (object->pg_color + incr) & PQ_L2_MASK)); 214 object->handle = NULL; 215 object->backing_object = NULL; 216 object->backing_object_offset = (vm_ooffset_t) 0; 217 /* 218 * Try to generate a number that will spread objects out in the 219 * hash table. We 'wipe' new objects across the hash in 128 page 220 * increments plus 1 more to offset it a little more by the time 221 * it wraps around. 222 */ 223 do { 224 exp = object_hash_rand; 225 object->hash_rand = exp - 129; 226 } while (!atomic_cmpset_int(&object_hash_rand, exp, object->hash_rand)); 227 228 object->generation++; /* atomicity needed? XXX */ 229 230 mtx_lock(&vm_object_list_mtx); 231 TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); 232 mtx_unlock(&vm_object_list_mtx); 233} 234 235/* 236 * vm_object_init: 237 * 238 * Initialize the VM objects module. 239 */ 240void 241vm_object_init(void) 242{ 243 TAILQ_INIT(&vm_object_list); 244 mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF); 245 246 kernel_object = &kernel_object_store; 247 _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), 248 kernel_object); 249 250 kmem_object = &kmem_object_store; 251 _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), 252 kmem_object); 253 obj_zone = uma_zcreate("VM OBJECT", sizeof (struct vm_object), NULL, 254#ifdef INVARIANTS 255 vm_object_zdtor, 256#else 257 NULL, 258#endif 259 vm_object_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 260 uma_prealloc(obj_zone, VM_OBJECTS_INIT); 261} 262 263void 264vm_object_init2(void) 265{ 266} 267 268void 269vm_object_set_flag(vm_object_t object, u_short bits) 270{ 271 object->flags |= bits; 272} 273 274void 275vm_object_clear_flag(vm_object_t object, u_short bits) 276{ 277 GIANT_REQUIRED; 278 object->flags &= ~bits; 279} 280 281void 282vm_object_pip_add(vm_object_t object, short i) 283{ 284 GIANT_REQUIRED; 285 object->paging_in_progress += i; 286} 287 288void 289vm_object_pip_subtract(vm_object_t object, short i) 290{ 291 GIANT_REQUIRED; 292 object->paging_in_progress -= i; 293} 294 295void 296vm_object_pip_wakeup(vm_object_t object) 297{ 298 GIANT_REQUIRED; 299 object->paging_in_progress--; 300 if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) { 301 vm_object_clear_flag(object, OBJ_PIPWNT); 302 wakeup(object); 303 } 304} 305 306void 307vm_object_pip_wakeupn(vm_object_t object, short i) 308{ 309 GIANT_REQUIRED; 310 if (i) 311 object->paging_in_progress -= i; 312 if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) { 313 vm_object_clear_flag(object, OBJ_PIPWNT); 314 wakeup(object); 315 } 316} 317 318void 319vm_object_pip_sleep(vm_object_t object, char *waitid) 320{ 321 GIANT_REQUIRED; 322 if (object->paging_in_progress) { 323 int s = splvm(); 324 if (object->paging_in_progress) { 325 vm_object_set_flag(object, OBJ_PIPWNT); 326 tsleep(object, PVM, waitid, 0); 327 } 328 splx(s); 329 } 330} 331 332void 333vm_object_pip_wait(vm_object_t object, char *waitid) 334{ 335 GIANT_REQUIRED; 336 while (object->paging_in_progress) 337 vm_object_pip_sleep(object, waitid); 338} 339 340/* 341 * vm_object_allocate_wait 342 * 343 * Return a new object with the given size, and give the user the 344 * option of waiting for it to complete or failing if the needed 345 * memory isn't available. 346 */ 347vm_object_t 348vm_object_allocate_wait(objtype_t type, vm_pindex_t size, int flags) 349{ 350 vm_object_t result; 351 352 result = (vm_object_t) uma_zalloc(obj_zone, flags); 353 354 if (result != NULL) 355 _vm_object_allocate(type, size, result); 356 357 return (result); 358} 359 360/* 361 * vm_object_allocate: 362 * 363 * Returns a new object with the given size. 364 */ 365vm_object_t 366vm_object_allocate(objtype_t type, vm_pindex_t size) 367{ 368 return(vm_object_allocate_wait(type, size, M_WAITOK)); 369} 370 371 372/* 373 * vm_object_reference: 374 * 375 * Gets another reference to the given object. 376 */ 377void 378vm_object_reference(vm_object_t object) 379{ 380 if (object == NULL) 381 return; 382 383 vm_object_lock(object); 384#if 0 385 /* object can be re-referenced during final cleaning */ 386 KASSERT(!(object->flags & OBJ_DEAD), 387 ("vm_object_reference: attempting to reference dead obj")); 388#endif 389 390 object->ref_count++; 391 if (object->type == OBJT_VNODE) { 392 while (vget((struct vnode *) object->handle, LK_RETRY, curthread)) { 393 printf("vm_object_reference: delay in getting object\n"); 394 } 395 } 396 vm_object_unlock(object); 397} 398 399/* 400 * handle deallocating a object of type OBJT_VNODE 401 */ 402void 403vm_object_vndeallocate(vm_object_t object) 404{ 405 struct vnode *vp = (struct vnode *) object->handle; 406 407 GIANT_REQUIRED; 408 KASSERT(object->type == OBJT_VNODE, 409 ("vm_object_vndeallocate: not a vnode object")); 410 KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp")); 411#ifdef INVARIANTS 412 if (object->ref_count == 0) { 413 vprint("vm_object_vndeallocate", vp); 414 panic("vm_object_vndeallocate: bad object reference count"); 415 } 416#endif 417 418 object->ref_count--; 419 if (object->ref_count == 0) { 420 mp_fixme("Unlocked vflag access."); 421 vp->v_vflag &= ~VV_TEXT; 422#ifdef ENABLE_VFS_IOOPT 423 vm_object_clear_flag(object, OBJ_OPT); 424#endif 425 } 426 /* 427 * vrele may need a vop lock 428 */ 429 vrele(vp); 430} 431 432/* 433 * vm_object_deallocate: 434 * 435 * Release a reference to the specified object, 436 * gained either through a vm_object_allocate 437 * or a vm_object_reference call. When all references 438 * are gone, storage associated with this object 439 * may be relinquished. 440 * 441 * No object may be locked. 442 */ 443void 444vm_object_deallocate(vm_object_t object) 445{ 446 vm_object_t temp; 447 448 mtx_lock(&Giant); 449 while (object != NULL) { 450 451 if (object->type == OBJT_VNODE) { 452 vm_object_vndeallocate(object); 453 mtx_unlock(&Giant); 454 return; 455 } 456 457 KASSERT(object->ref_count != 0, 458 ("vm_object_deallocate: object deallocated too many times: %d", object->type)); 459 460 /* 461 * If the reference count goes to 0 we start calling 462 * vm_object_terminate() on the object chain. 463 * A ref count of 1 may be a special case depending on the 464 * shadow count being 0 or 1. 465 */ 466 object->ref_count--; 467 if (object->ref_count > 1) { 468 mtx_unlock(&Giant); 469 return; 470 } else if (object->ref_count == 1) { 471 if (object->shadow_count == 0) { 472 vm_object_set_flag(object, OBJ_ONEMAPPING); 473 } else if ((object->shadow_count == 1) && 474 (object->handle == NULL) && 475 (object->type == OBJT_DEFAULT || 476 object->type == OBJT_SWAP)) { 477 vm_object_t robject; 478 479 robject = TAILQ_FIRST(&object->shadow_head); 480 KASSERT(robject != NULL, 481 ("vm_object_deallocate: ref_count: %d, shadow_count: %d", 482 object->ref_count, 483 object->shadow_count)); 484 if ((robject->handle == NULL) && 485 (robject->type == OBJT_DEFAULT || 486 robject->type == OBJT_SWAP)) { 487 488 robject->ref_count++; 489 490 while ( 491 robject->paging_in_progress || 492 object->paging_in_progress 493 ) { 494 vm_object_pip_sleep(robject, "objde1"); 495 vm_object_pip_sleep(object, "objde2"); 496 } 497 498 if (robject->ref_count == 1) { 499 robject->ref_count--; 500 object = robject; 501 goto doterm; 502 } 503 504 object = robject; 505 vm_object_collapse(object); 506 continue; 507 } 508 } 509 mtx_unlock(&Giant); 510 return; 511 } 512doterm: 513 temp = object->backing_object; 514 if (temp) { 515 TAILQ_REMOVE(&temp->shadow_head, object, shadow_list); 516 temp->shadow_count--; 517#ifdef ENABLE_VFS_IOOPT 518 if (temp->ref_count == 0) 519 vm_object_clear_flag(temp, OBJ_OPT); 520#endif 521 temp->generation++; 522 object->backing_object = NULL; 523 } 524 /* 525 * Don't double-terminate, we could be in a termination 526 * recursion due to the terminate having to sync data 527 * to disk. 528 */ 529 if ((object->flags & OBJ_DEAD) == 0) 530 vm_object_terminate(object); 531 object = temp; 532 } 533 mtx_unlock(&Giant); 534} 535 536/* 537 * vm_object_terminate actually destroys the specified object, freeing 538 * up all previously used resources. 539 * 540 * The object must be locked. 541 * This routine may block. 542 */ 543void 544vm_object_terminate(vm_object_t object) 545{ 546 vm_page_t p; 547 int s; 548 549 GIANT_REQUIRED; 550 551 /* 552 * Make sure no one uses us. 553 */ 554 vm_object_set_flag(object, OBJ_DEAD); 555 556 /* 557 * wait for the pageout daemon to be done with the object 558 */ 559 vm_object_pip_wait(object, "objtrm"); 560 561 KASSERT(!object->paging_in_progress, 562 ("vm_object_terminate: pageout in progress")); 563 564 /* 565 * Clean and free the pages, as appropriate. All references to the 566 * object are gone, so we don't need to lock it. 567 */ 568 if (object->type == OBJT_VNODE) { 569 struct vnode *vp; 570 571#ifdef ENABLE_VFS_IOOPT 572 /* 573 * Freeze optimized copies. 574 */ 575 vm_freeze_copyopts(object, 0, object->size); 576#endif 577 /* 578 * Clean pages and flush buffers. 579 */ 580 vm_object_page_clean(object, 0, 0, OBJPC_SYNC); 581 582 vp = (struct vnode *) object->handle; 583 vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0); 584 } 585 586 KASSERT(object->ref_count == 0, 587 ("vm_object_terminate: object with references, ref_count=%d", 588 object->ref_count)); 589 590 /* 591 * Now free any remaining pages. For internal objects, this also 592 * removes them from paging queues. Don't free wired pages, just 593 * remove them from the object. 594 */ 595 s = splvm(); 596 vm_page_lock_queues(); 597 while ((p = TAILQ_FIRST(&object->memq)) != NULL) { 598 KASSERT(!p->busy && (p->flags & PG_BUSY) == 0, 599 ("vm_object_terminate: freeing busy page %p " 600 "p->busy = %d, p->flags %x\n", p, p->busy, p->flags)); 601 if (p->wire_count == 0) { 602 vm_page_busy(p); 603 vm_page_free(p); 604 cnt.v_pfree++; 605 } else { 606 vm_page_busy(p); 607 vm_page_remove(p); 608 } 609 } 610 vm_page_unlock_queues(); 611 splx(s); 612 613 /* 614 * Let the pager know object is dead. 615 */ 616 vm_pager_deallocate(object); 617 618 /* 619 * Remove the object from the global object list. 620 */ 621 mtx_lock(&vm_object_list_mtx); 622 TAILQ_REMOVE(&vm_object_list, object, object_list); 623 mtx_unlock(&vm_object_list_mtx); 624 625 wakeup(object); 626 627 /* 628 * Free the space for the object. 629 */ 630 uma_zfree(obj_zone, object); 631} 632 633/* 634 * vm_object_page_clean 635 * 636 * Clean all dirty pages in the specified range of object. Leaves page 637 * on whatever queue it is currently on. If NOSYNC is set then do not 638 * write out pages with PG_NOSYNC set (originally comes from MAP_NOSYNC), 639 * leaving the object dirty. 640 * 641 * Odd semantics: if start == end, we clean everything. 642 * 643 * The object must be locked. 644 */ 645void 646vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int flags) 647{ 648 vm_page_t p, np; 649 vm_pindex_t tstart, tend; 650 vm_pindex_t pi; 651 struct vnode *vp; 652 int clearobjflags; 653 int pagerflags; 654 int curgeneration; 655 656 GIANT_REQUIRED; 657 658 if (object->type != OBJT_VNODE || 659 (object->flags & OBJ_MIGHTBEDIRTY) == 0) 660 return; 661 662 pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) ? VM_PAGER_PUT_SYNC : 0; 663 pagerflags |= (flags & OBJPC_INVAL) ? VM_PAGER_PUT_INVAL : 0; 664 665 vp = object->handle; 666 667 vm_object_set_flag(object, OBJ_CLEANING); 668 669 tstart = start; 670 if (end == 0) { 671 tend = object->size; 672 } else { 673 tend = end; 674 } 675 676 /* 677 * If the caller is smart and only msync()s a range he knows is 678 * dirty, we may be able to avoid an object scan. This results in 679 * a phenominal improvement in performance. We cannot do this 680 * as a matter of course because the object may be huge - e.g. 681 * the size might be in the gigabytes or terrabytes. 682 */ 683 if (msync_flush_flags & MSYNC_FLUSH_HARDSEQ) { 684 vm_pindex_t tscan; 685 int scanlimit; 686 int scanreset; 687 688 scanreset = object->resident_page_count / EASY_SCAN_FACTOR; 689 if (scanreset < 16) 690 scanreset = 16; 691 692 scanlimit = scanreset; 693 tscan = tstart; 694 while (tscan < tend) { 695 curgeneration = object->generation; 696 p = vm_page_lookup(object, tscan); 697 if (p == NULL || p->valid == 0 || 698 (p->queue - p->pc) == PQ_CACHE) { 699 if (--scanlimit == 0) 700 break; 701 ++tscan; 702 continue; 703 } 704 vm_page_test_dirty(p); 705 if ((p->dirty & p->valid) == 0) { 706 if (--scanlimit == 0) 707 break; 708 ++tscan; 709 continue; 710 } 711 /* 712 * If we have been asked to skip nosync pages and 713 * this is a nosync page, we can't continue. 714 */ 715 if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) { 716 if (--scanlimit == 0) 717 break; 718 ++tscan; 719 continue; 720 } 721 scanlimit = scanreset; 722 723 /* 724 * This returns 0 if it was unable to busy the first 725 * page (i.e. had to sleep). 726 */ 727 tscan += vm_object_page_collect_flush(object, p, curgeneration, pagerflags); 728 } 729 730 /* 731 * If everything was dirty and we flushed it successfully, 732 * and the requested range is not the entire object, we 733 * don't have to mess with CLEANCHK or MIGHTBEDIRTY and can 734 * return immediately. 735 */ 736 if (tscan >= tend && (tstart || tend < object->size)) { 737 vm_object_clear_flag(object, OBJ_CLEANING); 738 return; 739 } 740 } 741 742 /* 743 * Generally set CLEANCHK interlock and make the page read-only so 744 * we can then clear the object flags. 745 * 746 * However, if this is a nosync mmap then the object is likely to 747 * stay dirty so do not mess with the page and do not clear the 748 * object flags. 749 */ 750 clearobjflags = 1; 751 752 TAILQ_FOREACH(p, &object->memq, listq) { 753 vm_page_flag_set(p, PG_CLEANCHK); 754 if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) 755 clearobjflags = 0; 756 else 757 vm_page_protect(p, VM_PROT_READ); 758 } 759 760 if (clearobjflags && (tstart == 0) && (tend == object->size)) { 761 struct vnode *vp; 762 763 vm_object_clear_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY); 764 if (object->type == OBJT_VNODE && 765 (vp = (struct vnode *)object->handle) != NULL) { 766 VI_LOCK(vp); 767 if (vp->v_iflag & VI_OBJDIRTY) 768 vp->v_iflag &= ~VI_OBJDIRTY; 769 VI_UNLOCK(vp); 770 } 771 } 772 773rescan: 774 curgeneration = object->generation; 775 776 for (p = TAILQ_FIRST(&object->memq); p; p = np) { 777 int n; 778 779 np = TAILQ_NEXT(p, listq); 780 781again: 782 pi = p->pindex; 783 if (((p->flags & PG_CLEANCHK) == 0) || 784 (pi < tstart) || (pi >= tend) || 785 (p->valid == 0) || 786 ((p->queue - p->pc) == PQ_CACHE)) { 787 vm_page_flag_clear(p, PG_CLEANCHK); 788 continue; 789 } 790 791 vm_page_test_dirty(p); 792 if ((p->dirty & p->valid) == 0) { 793 vm_page_flag_clear(p, PG_CLEANCHK); 794 continue; 795 } 796 797 /* 798 * If we have been asked to skip nosync pages and this is a 799 * nosync page, skip it. Note that the object flags were 800 * not cleared in this case so we do not have to set them. 801 */ 802 if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) { 803 vm_page_flag_clear(p, PG_CLEANCHK); 804 continue; 805 } 806 807 n = vm_object_page_collect_flush(object, p, 808 curgeneration, pagerflags); 809 if (n == 0) 810 goto rescan; 811 812 if (object->generation != curgeneration) 813 goto rescan; 814 815 /* 816 * Try to optimize the next page. If we can't we pick up 817 * our (random) scan where we left off. 818 */ 819 if (msync_flush_flags & MSYNC_FLUSH_SOFTSEQ) { 820 if ((p = vm_page_lookup(object, pi + n)) != NULL) 821 goto again; 822 } 823 } 824 825#if 0 826 VOP_FSYNC(vp, NULL, (pagerflags & VM_PAGER_PUT_SYNC)?MNT_WAIT:0, curproc); 827#endif 828 829 vm_object_clear_flag(object, OBJ_CLEANING); 830 return; 831} 832 833static int 834vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration, int pagerflags) 835{ 836 int runlen; 837 int s; 838 int maxf; 839 int chkb; 840 int maxb; 841 int i; 842 vm_pindex_t pi; 843 vm_page_t maf[vm_pageout_page_count]; 844 vm_page_t mab[vm_pageout_page_count]; 845 vm_page_t ma[vm_pageout_page_count]; 846 847 s = splvm(); 848 pi = p->pindex; 849 while (vm_page_sleep_busy(p, TRUE, "vpcwai")) { 850 if (object->generation != curgeneration) { 851 splx(s); 852 return(0); 853 } 854 } 855 vm_page_lock_queues(); 856 maxf = 0; 857 for(i = 1; i < vm_pageout_page_count; i++) { 858 vm_page_t tp; 859 860 if ((tp = vm_page_lookup(object, pi + i)) != NULL) { 861 if ((tp->flags & PG_BUSY) || 862 (tp->flags & PG_CLEANCHK) == 0 || 863 (tp->busy != 0)) 864 break; 865 if((tp->queue - tp->pc) == PQ_CACHE) { 866 vm_page_flag_clear(tp, PG_CLEANCHK); 867 break; 868 } 869 vm_page_test_dirty(tp); 870 if ((tp->dirty & tp->valid) == 0) { 871 vm_page_flag_clear(tp, PG_CLEANCHK); 872 break; 873 } 874 maf[ i - 1 ] = tp; 875 maxf++; 876 continue; 877 } 878 break; 879 } 880 881 maxb = 0; 882 chkb = vm_pageout_page_count - maxf; 883 if (chkb) { 884 for(i = 1; i < chkb;i++) { 885 vm_page_t tp; 886 887 if ((tp = vm_page_lookup(object, pi - i)) != NULL) { 888 if ((tp->flags & PG_BUSY) || 889 (tp->flags & PG_CLEANCHK) == 0 || 890 (tp->busy != 0)) 891 break; 892 if ((tp->queue - tp->pc) == PQ_CACHE) { 893 vm_page_flag_clear(tp, PG_CLEANCHK); 894 break; 895 } 896 vm_page_test_dirty(tp); 897 if ((tp->dirty & tp->valid) == 0) { 898 vm_page_flag_clear(tp, PG_CLEANCHK); 899 break; 900 } 901 mab[ i - 1 ] = tp; 902 maxb++; 903 continue; 904 } 905 break; 906 } 907 } 908 909 for(i = 0; i < maxb; i++) { 910 int index = (maxb - i) - 1; 911 ma[index] = mab[i]; 912 vm_page_flag_clear(ma[index], PG_CLEANCHK); 913 } 914 vm_page_flag_clear(p, PG_CLEANCHK); 915 ma[maxb] = p; 916 for(i = 0; i < maxf; i++) { 917 int index = (maxb + i) + 1; 918 ma[index] = maf[i]; 919 vm_page_flag_clear(ma[index], PG_CLEANCHK); 920 } 921 runlen = maxb + maxf + 1; 922 923 splx(s); 924 vm_pageout_flush(ma, runlen, pagerflags); 925 for (i = 0; i < runlen; i++) { 926 if (ma[i]->valid & ma[i]->dirty) { 927 vm_page_protect(ma[i], VM_PROT_READ); 928 vm_page_flag_set(ma[i], PG_CLEANCHK); 929 930 /* 931 * maxf will end up being the actual number of pages 932 * we wrote out contiguously, non-inclusive of the 933 * first page. We do not count look-behind pages. 934 */ 935 if (i >= maxb + 1 && (maxf > i - maxb - 1)) 936 maxf = i - maxb - 1; 937 } 938 } 939 vm_page_unlock_queues(); 940 return(maxf + 1); 941} 942 943#ifdef ENABLE_VFS_IOOPT 944/* 945 * Same as vm_object_pmap_copy, except range checking really 946 * works, and is meant for small sections of an object. 947 * 948 * This code protects resident pages by making them read-only 949 * and is typically called on a fork or split when a page 950 * is converted to copy-on-write. 951 * 952 * NOTE: If the page is already at VM_PROT_NONE, calling 953 * vm_page_protect will have no effect. 954 */ 955void 956vm_object_pmap_copy_1(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 957{ 958 vm_pindex_t idx; 959 vm_page_t p; 960 961 GIANT_REQUIRED; 962 963 if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0) 964 return; 965 966 for (idx = start; idx < end; idx++) { 967 p = vm_page_lookup(object, idx); 968 if (p == NULL) 969 continue; 970 vm_page_protect(p, VM_PROT_READ); 971 } 972} 973#endif 974 975/* 976 * vm_object_pmap_remove: 977 * 978 * Removes all physical pages in the specified 979 * object range from all physical maps. 980 * 981 * The object must *not* be locked. 982 */ 983void 984vm_object_pmap_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 985{ 986 vm_page_t p; 987 988 GIANT_REQUIRED; 989 if (object == NULL) 990 return; 991 TAILQ_FOREACH(p, &object->memq, listq) { 992 if (p->pindex >= start && p->pindex < end) 993 pmap_page_protect(p, VM_PROT_NONE); 994 } 995 if ((start == 0) && (object->size == end)) 996 vm_object_clear_flag(object, OBJ_WRITEABLE); 997} 998 999/* 1000 * vm_object_madvise: 1001 * 1002 * Implements the madvise function at the object/page level. 1003 * 1004 * MADV_WILLNEED (any object) 1005 * 1006 * Activate the specified pages if they are resident. 1007 * 1008 * MADV_DONTNEED (any object) 1009 * 1010 * Deactivate the specified pages if they are resident. 1011 * 1012 * MADV_FREE (OBJT_DEFAULT/OBJT_SWAP objects, 1013 * OBJ_ONEMAPPING only) 1014 * 1015 * Deactivate and clean the specified pages if they are 1016 * resident. This permits the process to reuse the pages 1017 * without faulting or the kernel to reclaim the pages 1018 * without I/O. 1019 */ 1020void 1021vm_object_madvise(vm_object_t object, vm_pindex_t pindex, int count, int advise) 1022{ 1023 vm_pindex_t end, tpindex; 1024 vm_object_t tobject; 1025 vm_page_t m; 1026 1027 if (object == NULL) 1028 return; 1029 1030 vm_object_lock(object); 1031 1032 end = pindex + count; 1033 1034 /* 1035 * Locate and adjust resident pages 1036 */ 1037 for (; pindex < end; pindex += 1) { 1038relookup: 1039 tobject = object; 1040 tpindex = pindex; 1041shadowlookup: 1042 /* 1043 * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages 1044 * and those pages must be OBJ_ONEMAPPING. 1045 */ 1046 if (advise == MADV_FREE) { 1047 if ((tobject->type != OBJT_DEFAULT && 1048 tobject->type != OBJT_SWAP) || 1049 (tobject->flags & OBJ_ONEMAPPING) == 0) { 1050 continue; 1051 } 1052 } 1053 1054 m = vm_page_lookup(tobject, tpindex); 1055 1056 if (m == NULL) { 1057 /* 1058 * There may be swap even if there is no backing page 1059 */ 1060 if (advise == MADV_FREE && tobject->type == OBJT_SWAP) 1061 swap_pager_freespace(tobject, tpindex, 1); 1062 1063 /* 1064 * next object 1065 */ 1066 tobject = tobject->backing_object; 1067 if (tobject == NULL) 1068 continue; 1069 tpindex += OFF_TO_IDX(tobject->backing_object_offset); 1070 goto shadowlookup; 1071 } 1072 1073 /* 1074 * If the page is busy or not in a normal active state, 1075 * we skip it. If the page is not managed there are no 1076 * page queues to mess with. Things can break if we mess 1077 * with pages in any of the below states. 1078 */ 1079 vm_page_lock_queues(); 1080 if (m->hold_count || 1081 m->wire_count || 1082 (m->flags & PG_UNMANAGED) || 1083 m->valid != VM_PAGE_BITS_ALL) { 1084 vm_page_unlock_queues(); 1085 continue; 1086 } 1087 if (vm_page_sleep_if_busy(m, TRUE, "madvpo")) 1088 goto relookup; 1089 if (advise == MADV_WILLNEED) { 1090 vm_page_activate(m); 1091 } else if (advise == MADV_DONTNEED) { 1092 vm_page_dontneed(m); 1093 } else if (advise == MADV_FREE) { 1094 /* 1095 * Mark the page clean. This will allow the page 1096 * to be freed up by the system. However, such pages 1097 * are often reused quickly by malloc()/free() 1098 * so we do not do anything that would cause 1099 * a page fault if we can help it. 1100 * 1101 * Specifically, we do not try to actually free 1102 * the page now nor do we try to put it in the 1103 * cache (which would cause a page fault on reuse). 1104 * 1105 * But we do make the page is freeable as we 1106 * can without actually taking the step of unmapping 1107 * it. 1108 */ 1109 pmap_clear_modify(m); 1110 m->dirty = 0; 1111 m->act_count = 0; 1112 vm_page_dontneed(m); 1113 } 1114 vm_page_unlock_queues(); 1115 if (advise == MADV_FREE && tobject->type == OBJT_SWAP) 1116 swap_pager_freespace(tobject, tpindex, 1); 1117 } 1118 vm_object_unlock(object); 1119} 1120 1121/* 1122 * vm_object_shadow: 1123 * 1124 * Create a new object which is backed by the 1125 * specified existing object range. The source 1126 * object reference is deallocated. 1127 * 1128 * The new object and offset into that object 1129 * are returned in the source parameters. 1130 */ 1131void 1132vm_object_shadow( 1133 vm_object_t *object, /* IN/OUT */ 1134 vm_ooffset_t *offset, /* IN/OUT */ 1135 vm_size_t length) 1136{ 1137 vm_object_t source; 1138 vm_object_t result; 1139 1140 source = *object; 1141 1142 vm_object_lock(source); 1143 /* 1144 * Don't create the new object if the old object isn't shared. 1145 */ 1146 if (source != NULL && 1147 source->ref_count == 1 && 1148 source->handle == NULL && 1149 (source->type == OBJT_DEFAULT || 1150 source->type == OBJT_SWAP)) { 1151 vm_object_unlock(source); 1152 return; 1153 } 1154 1155 /* 1156 * Allocate a new object with the given length 1157 */ 1158 result = vm_object_allocate(OBJT_DEFAULT, length); 1159 KASSERT(result != NULL, ("vm_object_shadow: no object for shadowing")); 1160 1161 /* 1162 * The new object shadows the source object, adding a reference to it. 1163 * Our caller changes his reference to point to the new object, 1164 * removing a reference to the source object. Net result: no change 1165 * of reference count. 1166 * 1167 * Try to optimize the result object's page color when shadowing 1168 * in order to maintain page coloring consistency in the combined 1169 * shadowed object. 1170 */ 1171 result->backing_object = source; 1172 if (source) { 1173 TAILQ_INSERT_TAIL(&source->shadow_head, result, shadow_list); 1174 source->shadow_count++; 1175 source->generation++; 1176 if (length < source->size) 1177 length = source->size; 1178 if (length > PQ_L2_SIZE / 3 + PQ_PRIME1 || 1179 source->generation > 1) 1180 length = PQ_L2_SIZE / 3 + PQ_PRIME1; 1181 result->pg_color = (source->pg_color + 1182 length * source->generation) & PQ_L2_MASK; 1183 next_index = (result->pg_color + PQ_L2_SIZE / 3 + PQ_PRIME1) & 1184 PQ_L2_MASK; 1185 } 1186 1187 /* 1188 * Store the offset into the source object, and fix up the offset into 1189 * the new object. 1190 */ 1191 result->backing_object_offset = *offset; 1192 1193 /* 1194 * Return the new things 1195 */ 1196 *offset = 0; 1197 *object = result; 1198 1199 vm_object_unlock(source); 1200} 1201 1202/* 1203 * vm_object_split: 1204 * 1205 * Split the pages in a map entry into a new object. This affords 1206 * easier removal of unused pages, and keeps object inheritance from 1207 * being a negative impact on memory usage. 1208 */ 1209void 1210vm_object_split(vm_map_entry_t entry) 1211{ 1212 vm_page_t m; 1213 vm_object_t orig_object, new_object, source; 1214 vm_offset_t s, e; 1215 vm_pindex_t offidxstart, offidxend; 1216 vm_size_t idx, size; 1217 vm_ooffset_t offset; 1218 1219 GIANT_REQUIRED; 1220 1221 orig_object = entry->object.vm_object; 1222 if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP) 1223 return; 1224 if (orig_object->ref_count <= 1) 1225 return; 1226 1227 offset = entry->offset; 1228 s = entry->start; 1229 e = entry->end; 1230 1231 offidxstart = OFF_TO_IDX(offset); 1232 offidxend = offidxstart + OFF_TO_IDX(e - s); 1233 size = offidxend - offidxstart; 1234 1235 new_object = vm_pager_allocate(orig_object->type, 1236 NULL, IDX_TO_OFF(size), VM_PROT_ALL, 0LL); 1237 if (new_object == NULL) 1238 return; 1239 1240 source = orig_object->backing_object; 1241 if (source != NULL) { 1242 vm_object_reference(source); /* Referenced by new_object */ 1243 TAILQ_INSERT_TAIL(&source->shadow_head, 1244 new_object, shadow_list); 1245 vm_object_clear_flag(source, OBJ_ONEMAPPING); 1246 new_object->backing_object_offset = 1247 orig_object->backing_object_offset + offset; 1248 new_object->backing_object = source; 1249 source->shadow_count++; 1250 source->generation++; 1251 } 1252 for (idx = 0; idx < size; idx++) { 1253 retry: 1254 m = vm_page_lookup(orig_object, offidxstart + idx); 1255 if (m == NULL) 1256 continue; 1257 1258 /* 1259 * We must wait for pending I/O to complete before we can 1260 * rename the page. 1261 * 1262 * We do not have to VM_PROT_NONE the page as mappings should 1263 * not be changed by this operation. 1264 */ 1265 vm_page_lock_queues(); 1266 if (vm_page_sleep_if_busy(m, TRUE, "spltwt")) 1267 goto retry; 1268 1269 vm_page_busy(m); 1270 vm_page_unlock_queues(); 1271 vm_page_rename(m, new_object, idx); 1272 /* page automatically made dirty by rename and cache handled */ 1273 vm_page_busy(m); 1274 } 1275 if (orig_object->type == OBJT_SWAP) { 1276 vm_object_pip_add(orig_object, 1); 1277 /* 1278 * copy orig_object pages into new_object 1279 * and destroy unneeded pages in 1280 * shadow object. 1281 */ 1282 swap_pager_copy(orig_object, new_object, offidxstart, 0); 1283 vm_object_pip_wakeup(orig_object); 1284 } 1285 TAILQ_FOREACH(m, &new_object->memq, listq) 1286 vm_page_wakeup(m); 1287 entry->object.vm_object = new_object; 1288 entry->offset = 0LL; 1289 vm_object_deallocate(orig_object); 1290} 1291 1292#define OBSC_TEST_ALL_SHADOWED 0x0001 1293#define OBSC_COLLAPSE_NOWAIT 0x0002 1294#define OBSC_COLLAPSE_WAIT 0x0004 1295 1296static __inline int 1297vm_object_backing_scan(vm_object_t object, int op) 1298{ 1299 int s; 1300 int r = 1; 1301 vm_page_t p; 1302 vm_object_t backing_object; 1303 vm_pindex_t backing_offset_index; 1304 1305 s = splvm(); 1306 GIANT_REQUIRED; 1307 1308 backing_object = object->backing_object; 1309 backing_offset_index = OFF_TO_IDX(object->backing_object_offset); 1310 1311 /* 1312 * Initial conditions 1313 */ 1314 if (op & OBSC_TEST_ALL_SHADOWED) { 1315 /* 1316 * We do not want to have to test for the existence of 1317 * swap pages in the backing object. XXX but with the 1318 * new swapper this would be pretty easy to do. 1319 * 1320 * XXX what about anonymous MAP_SHARED memory that hasn't 1321 * been ZFOD faulted yet? If we do not test for this, the 1322 * shadow test may succeed! XXX 1323 */ 1324 if (backing_object->type != OBJT_DEFAULT) { 1325 splx(s); 1326 return (0); 1327 } 1328 } 1329 if (op & OBSC_COLLAPSE_WAIT) { 1330 vm_object_set_flag(backing_object, OBJ_DEAD); 1331 } 1332 1333 /* 1334 * Our scan 1335 */ 1336 p = TAILQ_FIRST(&backing_object->memq); 1337 while (p) { 1338 vm_page_t next = TAILQ_NEXT(p, listq); 1339 vm_pindex_t new_pindex = p->pindex - backing_offset_index; 1340 1341 if (op & OBSC_TEST_ALL_SHADOWED) { 1342 vm_page_t pp; 1343 1344 /* 1345 * Ignore pages outside the parent object's range 1346 * and outside the parent object's mapping of the 1347 * backing object. 1348 * 1349 * note that we do not busy the backing object's 1350 * page. 1351 */ 1352 if ( 1353 p->pindex < backing_offset_index || 1354 new_pindex >= object->size 1355 ) { 1356 p = next; 1357 continue; 1358 } 1359 1360 /* 1361 * See if the parent has the page or if the parent's 1362 * object pager has the page. If the parent has the 1363 * page but the page is not valid, the parent's 1364 * object pager must have the page. 1365 * 1366 * If this fails, the parent does not completely shadow 1367 * the object and we might as well give up now. 1368 */ 1369 1370 pp = vm_page_lookup(object, new_pindex); 1371 if ( 1372 (pp == NULL || pp->valid == 0) && 1373 !vm_pager_has_page(object, new_pindex, NULL, NULL) 1374 ) { 1375 r = 0; 1376 break; 1377 } 1378 } 1379 1380 /* 1381 * Check for busy page 1382 */ 1383 if (op & (OBSC_COLLAPSE_WAIT | OBSC_COLLAPSE_NOWAIT)) { 1384 vm_page_t pp; 1385 1386 vm_page_lock_queues(); 1387 if (op & OBSC_COLLAPSE_NOWAIT) { 1388 if ((p->flags & PG_BUSY) || 1389 !p->valid || 1390 p->hold_count || 1391 p->wire_count || 1392 p->busy) { 1393 vm_page_unlock_queues(); 1394 p = next; 1395 continue; 1396 } 1397 } else if (op & OBSC_COLLAPSE_WAIT) { 1398 if (vm_page_sleep_if_busy(p, TRUE, "vmocol")) { 1399 /* 1400 * If we slept, anything could have 1401 * happened. Since the object is 1402 * marked dead, the backing offset 1403 * should not have changed so we 1404 * just restart our scan. 1405 */ 1406 p = TAILQ_FIRST(&backing_object->memq); 1407 continue; 1408 } 1409 } 1410 1411 /* 1412 * Busy the page 1413 */ 1414 vm_page_busy(p); 1415 vm_page_unlock_queues(); 1416 1417 KASSERT( 1418 p->object == backing_object, 1419 ("vm_object_qcollapse(): object mismatch") 1420 ); 1421 1422 /* 1423 * Destroy any associated swap 1424 */ 1425 if (backing_object->type == OBJT_SWAP) { 1426 swap_pager_freespace( 1427 backing_object, 1428 p->pindex, 1429 1 1430 ); 1431 } 1432 1433 if ( 1434 p->pindex < backing_offset_index || 1435 new_pindex >= object->size 1436 ) { 1437 /* 1438 * Page is out of the parent object's range, we 1439 * can simply destroy it. 1440 */ 1441 vm_page_lock_queues(); 1442 pmap_page_protect(p, VM_PROT_NONE); 1443 vm_page_free(p); 1444 vm_page_unlock_queues(); 1445 p = next; 1446 continue; 1447 } 1448 1449 pp = vm_page_lookup(object, new_pindex); 1450 if ( 1451 pp != NULL || 1452 vm_pager_has_page(object, new_pindex, NULL, NULL) 1453 ) { 1454 /* 1455 * page already exists in parent OR swap exists 1456 * for this location in the parent. Destroy 1457 * the original page from the backing object. 1458 * 1459 * Leave the parent's page alone 1460 */ 1461 vm_page_lock_queues(); 1462 pmap_page_protect(p, VM_PROT_NONE); 1463 vm_page_free(p); 1464 vm_page_unlock_queues(); 1465 p = next; 1466 continue; 1467 } 1468 1469 /* 1470 * Page does not exist in parent, rename the 1471 * page from the backing object to the main object. 1472 * 1473 * If the page was mapped to a process, it can remain 1474 * mapped through the rename. 1475 */ 1476 vm_page_rename(p, object, new_pindex); 1477 /* page automatically made dirty by rename */ 1478 } 1479 p = next; 1480 } 1481 splx(s); 1482 return (r); 1483} 1484 1485 1486/* 1487 * this version of collapse allows the operation to occur earlier and 1488 * when paging_in_progress is true for an object... This is not a complete 1489 * operation, but should plug 99.9% of the rest of the leaks. 1490 */ 1491static void 1492vm_object_qcollapse(vm_object_t object) 1493{ 1494 vm_object_t backing_object = object->backing_object; 1495 1496 GIANT_REQUIRED; 1497 1498 if (backing_object->ref_count != 1) 1499 return; 1500 1501 backing_object->ref_count += 2; 1502 1503 vm_object_backing_scan(object, OBSC_COLLAPSE_NOWAIT); 1504 1505 backing_object->ref_count -= 2; 1506} 1507 1508/* 1509 * vm_object_collapse: 1510 * 1511 * Collapse an object with the object backing it. 1512 * Pages in the backing object are moved into the 1513 * parent, and the backing object is deallocated. 1514 */ 1515void 1516vm_object_collapse(vm_object_t object) 1517{ 1518 GIANT_REQUIRED; 1519 1520 while (TRUE) { 1521 vm_object_t backing_object; 1522 1523 /* 1524 * Verify that the conditions are right for collapse: 1525 * 1526 * The object exists and the backing object exists. 1527 */ 1528 if (object == NULL) 1529 break; 1530 1531 if ((backing_object = object->backing_object) == NULL) 1532 break; 1533 1534 /* 1535 * we check the backing object first, because it is most likely 1536 * not collapsable. 1537 */ 1538 if (backing_object->handle != NULL || 1539 (backing_object->type != OBJT_DEFAULT && 1540 backing_object->type != OBJT_SWAP) || 1541 (backing_object->flags & OBJ_DEAD) || 1542 object->handle != NULL || 1543 (object->type != OBJT_DEFAULT && 1544 object->type != OBJT_SWAP) || 1545 (object->flags & OBJ_DEAD)) { 1546 break; 1547 } 1548 1549 if ( 1550 object->paging_in_progress != 0 || 1551 backing_object->paging_in_progress != 0 1552 ) { 1553 vm_object_qcollapse(object); 1554 break; 1555 } 1556 1557 /* 1558 * We know that we can either collapse the backing object (if 1559 * the parent is the only reference to it) or (perhaps) have 1560 * the parent bypass the object if the parent happens to shadow 1561 * all the resident pages in the entire backing object. 1562 * 1563 * This is ignoring pager-backed pages such as swap pages. 1564 * vm_object_backing_scan fails the shadowing test in this 1565 * case. 1566 */ 1567 if (backing_object->ref_count == 1) { 1568 /* 1569 * If there is exactly one reference to the backing 1570 * object, we can collapse it into the parent. 1571 */ 1572 vm_object_backing_scan(object, OBSC_COLLAPSE_WAIT); 1573 1574 /* 1575 * Move the pager from backing_object to object. 1576 */ 1577 if (backing_object->type == OBJT_SWAP) { 1578 vm_object_pip_add(backing_object, 1); 1579 1580 /* 1581 * scrap the paging_offset junk and do a 1582 * discrete copy. This also removes major 1583 * assumptions about how the swap-pager 1584 * works from where it doesn't belong. The 1585 * new swapper is able to optimize the 1586 * destroy-source case. 1587 */ 1588 vm_object_pip_add(object, 1); 1589 swap_pager_copy( 1590 backing_object, 1591 object, 1592 OFF_TO_IDX(object->backing_object_offset), TRUE); 1593 vm_object_pip_wakeup(object); 1594 1595 vm_object_pip_wakeup(backing_object); 1596 } 1597 /* 1598 * Object now shadows whatever backing_object did. 1599 * Note that the reference to 1600 * backing_object->backing_object moves from within 1601 * backing_object to within object. 1602 */ 1603 TAILQ_REMOVE( 1604 &object->backing_object->shadow_head, 1605 object, 1606 shadow_list 1607 ); 1608 object->backing_object->shadow_count--; 1609 object->backing_object->generation++; 1610 if (backing_object->backing_object) { 1611 TAILQ_REMOVE( 1612 &backing_object->backing_object->shadow_head, 1613 backing_object, 1614 shadow_list 1615 ); 1616 backing_object->backing_object->shadow_count--; 1617 backing_object->backing_object->generation++; 1618 } 1619 object->backing_object = backing_object->backing_object; 1620 if (object->backing_object) { 1621 TAILQ_INSERT_TAIL( 1622 &object->backing_object->shadow_head, 1623 object, 1624 shadow_list 1625 ); 1626 object->backing_object->shadow_count++; 1627 object->backing_object->generation++; 1628 } 1629 1630 object->backing_object_offset += 1631 backing_object->backing_object_offset; 1632 1633 /* 1634 * Discard backing_object. 1635 * 1636 * Since the backing object has no pages, no pager left, 1637 * and no object references within it, all that is 1638 * necessary is to dispose of it. 1639 */ 1640 KASSERT(backing_object->ref_count == 1, ("backing_object %p was somehow re-referenced during collapse!", backing_object)); 1641 KASSERT(TAILQ_FIRST(&backing_object->memq) == NULL, ("backing_object %p somehow has left over pages during collapse!", backing_object)); 1642 1643 mtx_lock(&vm_object_list_mtx); 1644 TAILQ_REMOVE( 1645 &vm_object_list, 1646 backing_object, 1647 object_list 1648 ); 1649 mtx_unlock(&vm_object_list_mtx); 1650 1651 uma_zfree(obj_zone, backing_object); 1652 1653 object_collapses++; 1654 } else { 1655 vm_object_t new_backing_object; 1656 1657 /* 1658 * If we do not entirely shadow the backing object, 1659 * there is nothing we can do so we give up. 1660 */ 1661 if (vm_object_backing_scan(object, OBSC_TEST_ALL_SHADOWED) == 0) { 1662 break; 1663 } 1664 1665 /* 1666 * Make the parent shadow the next object in the 1667 * chain. Deallocating backing_object will not remove 1668 * it, since its reference count is at least 2. 1669 */ 1670 TAILQ_REMOVE( 1671 &backing_object->shadow_head, 1672 object, 1673 shadow_list 1674 ); 1675 backing_object->shadow_count--; 1676 backing_object->generation++; 1677 1678 new_backing_object = backing_object->backing_object; 1679 if ((object->backing_object = new_backing_object) != NULL) { 1680 vm_object_reference(new_backing_object); 1681 TAILQ_INSERT_TAIL( 1682 &new_backing_object->shadow_head, 1683 object, 1684 shadow_list 1685 ); 1686 new_backing_object->shadow_count++; 1687 new_backing_object->generation++; 1688 object->backing_object_offset += 1689 backing_object->backing_object_offset; 1690 } 1691 1692 /* 1693 * Drop the reference count on backing_object. Since 1694 * its ref_count was at least 2, it will not vanish; 1695 * so we don't need to call vm_object_deallocate, but 1696 * we do anyway. 1697 */ 1698 vm_object_deallocate(backing_object); 1699 object_bypasses++; 1700 } 1701 1702 /* 1703 * Try again with this object's new backing object. 1704 */ 1705 } 1706} 1707 1708/* 1709 * vm_object_page_remove: [internal] 1710 * 1711 * Removes all physical pages in the specified 1712 * object range from the object's list of pages. 1713 * 1714 * The object must be locked. 1715 */ 1716void 1717vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, boolean_t clean_only) 1718{ 1719 vm_page_t p, next; 1720 vm_pindex_t size; 1721 int all; 1722 1723 if (object == NULL) 1724 return; 1725 1726 mtx_lock(&Giant); 1727 if (object->resident_page_count == 0) { 1728 mtx_unlock(&Giant); 1729 return; 1730 } 1731 all = ((end == 0) && (start == 0)); 1732 1733 /* 1734 * Since physically-backed objects do not use managed pages, we can't 1735 * remove pages from the object (we must instead remove the page 1736 * references, and then destroy the object). 1737 */ 1738 KASSERT(object->type != OBJT_PHYS, ("attempt to remove pages from a physical object")); 1739 1740 vm_object_pip_add(object, 1); 1741again: 1742 vm_page_lock_queues(); 1743 size = end - start; 1744 if (all || size > object->resident_page_count / 4) { 1745 for (p = TAILQ_FIRST(&object->memq); p != NULL; p = next) { 1746 next = TAILQ_NEXT(p, listq); 1747 if (all || ((start <= p->pindex) && (p->pindex < end))) { 1748 if (p->wire_count != 0) { 1749 pmap_page_protect(p, VM_PROT_NONE); 1750 if (!clean_only) 1751 p->valid = 0; 1752 continue; 1753 } 1754 1755 /* 1756 * The busy flags are only cleared at 1757 * interrupt -- minimize the spl transitions 1758 */ 1759 if (vm_page_sleep_if_busy(p, TRUE, "vmopar")) 1760 goto again; 1761 1762 if (clean_only && p->valid) { 1763 vm_page_test_dirty(p); 1764 if (p->valid & p->dirty) 1765 continue; 1766 } 1767 vm_page_busy(p); 1768 pmap_page_protect(p, VM_PROT_NONE); 1769 vm_page_free(p); 1770 } 1771 } 1772 } else { 1773 while (size > 0) { 1774 if ((p = vm_page_lookup(object, start)) != NULL) { 1775 if (p->wire_count != 0) { 1776 pmap_page_protect(p, VM_PROT_NONE); 1777 if (!clean_only) 1778 p->valid = 0; 1779 start += 1; 1780 size -= 1; 1781 continue; 1782 } 1783 1784 /* 1785 * The busy flags are only cleared at 1786 * interrupt -- minimize the spl transitions 1787 */ 1788 if (vm_page_sleep_if_busy(p, TRUE, "vmopar")) 1789 goto again; 1790 1791 if (clean_only && p->valid) { 1792 vm_page_test_dirty(p); 1793 if (p->valid & p->dirty) { 1794 start += 1; 1795 size -= 1; 1796 continue; 1797 } 1798 } 1799 vm_page_busy(p); 1800 pmap_page_protect(p, VM_PROT_NONE); 1801 vm_page_free(p); 1802 } 1803 start += 1; 1804 size -= 1; 1805 } 1806 } 1807 vm_page_unlock_queues(); 1808 vm_object_pip_wakeup(object); 1809 mtx_unlock(&Giant); 1810} 1811 1812/* 1813 * Routine: vm_object_coalesce 1814 * Function: Coalesces two objects backing up adjoining 1815 * regions of memory into a single object. 1816 * 1817 * returns TRUE if objects were combined. 1818 * 1819 * NOTE: Only works at the moment if the second object is NULL - 1820 * if it's not, which object do we lock first? 1821 * 1822 * Parameters: 1823 * prev_object First object to coalesce 1824 * prev_offset Offset into prev_object 1825 * next_object Second object into coalesce 1826 * next_offset Offset into next_object 1827 * 1828 * prev_size Size of reference to prev_object 1829 * next_size Size of reference to next_object 1830 * 1831 * Conditions: 1832 * The object must *not* be locked. 1833 */ 1834boolean_t 1835vm_object_coalesce(vm_object_t prev_object, vm_pindex_t prev_pindex, 1836 vm_size_t prev_size, vm_size_t next_size) 1837{ 1838 vm_pindex_t next_pindex; 1839 1840 if (prev_object == NULL) 1841 return (TRUE); 1842 vm_object_lock(prev_object); 1843 if (prev_object->type != OBJT_DEFAULT && 1844 prev_object->type != OBJT_SWAP) { 1845 vm_object_unlock(prev_object); 1846 return (FALSE); 1847 } 1848 1849 /* 1850 * Try to collapse the object first 1851 */ 1852 vm_object_collapse(prev_object); 1853 1854 /* 1855 * Can't coalesce if: . more than one reference . paged out . shadows 1856 * another object . has a copy elsewhere (any of which mean that the 1857 * pages not mapped to prev_entry may be in use anyway) 1858 */ 1859 if (prev_object->backing_object != NULL) { 1860 vm_object_unlock(prev_object); 1861 return (FALSE); 1862 } 1863 1864 prev_size >>= PAGE_SHIFT; 1865 next_size >>= PAGE_SHIFT; 1866 next_pindex = prev_pindex + prev_size; 1867 1868 if ((prev_object->ref_count > 1) && 1869 (prev_object->size != next_pindex)) { 1870 vm_object_unlock(prev_object); 1871 return (FALSE); 1872 } 1873 1874 /* 1875 * Remove any pages that may still be in the object from a previous 1876 * deallocation. 1877 */ 1878 if (next_pindex < prev_object->size) { 1879 vm_object_page_remove(prev_object, 1880 next_pindex, 1881 next_pindex + next_size, FALSE); 1882 if (prev_object->type == OBJT_SWAP) 1883 swap_pager_freespace(prev_object, 1884 next_pindex, next_size); 1885 } 1886 1887 /* 1888 * Extend the object if necessary. 1889 */ 1890 if (next_pindex + next_size > prev_object->size) 1891 prev_object->size = next_pindex + next_size; 1892 1893 vm_object_unlock(prev_object); 1894 return (TRUE); 1895} 1896 1897void 1898vm_object_set_writeable_dirty(vm_object_t object) 1899{ 1900 struct vnode *vp; 1901 1902 vm_object_set_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY); 1903 if (object->type == OBJT_VNODE && 1904 (vp = (struct vnode *)object->handle) != NULL) { 1905 VI_LOCK(vp); 1906 if ((vp->v_iflag & VI_OBJDIRTY) == 0) 1907 vp->v_iflag |= VI_OBJDIRTY; 1908 VI_UNLOCK(vp); 1909 } 1910} 1911 1912#ifdef ENABLE_VFS_IOOPT 1913/* 1914 * Experimental support for zero-copy I/O 1915 * 1916 * Performs the copy_on_write operations necessary to allow the virtual copies 1917 * into user space to work. This has to be called for write(2) system calls 1918 * from other processes, file unlinking, and file size shrinkage. 1919 */ 1920void 1921vm_freeze_copyopts(vm_object_t object, vm_pindex_t froma, vm_pindex_t toa) 1922{ 1923 int rv; 1924 vm_object_t robject; 1925 vm_pindex_t idx; 1926 1927 GIANT_REQUIRED; 1928 if ((object == NULL) || 1929 ((object->flags & OBJ_OPT) == 0)) 1930 return; 1931 1932 if (object->shadow_count > object->ref_count) 1933 panic("vm_freeze_copyopts: sc > rc"); 1934 1935 while ((robject = TAILQ_FIRST(&object->shadow_head)) != NULL) { 1936 vm_pindex_t bo_pindex; 1937 vm_page_t m_in, m_out; 1938 1939 bo_pindex = OFF_TO_IDX(robject->backing_object_offset); 1940 1941 vm_object_reference(robject); 1942 1943 vm_object_pip_wait(robject, "objfrz"); 1944 1945 if (robject->ref_count == 1) { 1946 vm_object_deallocate(robject); 1947 continue; 1948 } 1949 1950 vm_object_pip_add(robject, 1); 1951 1952 for (idx = 0; idx < robject->size; idx++) { 1953 1954 m_out = vm_page_grab(robject, idx, 1955 VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 1956 1957 if (m_out->valid == 0) { 1958 m_in = vm_page_grab(object, bo_pindex + idx, 1959 VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 1960 if (m_in->valid == 0) { 1961 rv = vm_pager_get_pages(object, &m_in, 1, 0); 1962 if (rv != VM_PAGER_OK) { 1963 printf("vm_freeze_copyopts: cannot read page from file: %lx\n", (long)m_in->pindex); 1964 continue; 1965 } 1966 vm_page_lock_queues(); 1967 vm_page_deactivate(m_in); 1968 vm_page_unlock_queues(); 1969 } 1970 1971 pmap_page_protect(m_in, VM_PROT_NONE); 1972 pmap_copy_page(m_in, m_out); 1973 m_out->valid = m_in->valid; 1974 vm_page_dirty(m_out); 1975 vm_page_lock_queues(); 1976 vm_page_activate(m_out); 1977 vm_page_unlock_queues(); 1978 vm_page_wakeup(m_in); 1979 } 1980 vm_page_wakeup(m_out); 1981 } 1982 1983 object->shadow_count--; 1984 object->ref_count--; 1985 TAILQ_REMOVE(&object->shadow_head, robject, shadow_list); 1986 robject->backing_object = NULL; 1987 robject->backing_object_offset = 0; 1988 1989 vm_object_pip_wakeup(robject); 1990 vm_object_deallocate(robject); 1991 } 1992 1993 vm_object_clear_flag(object, OBJ_OPT); 1994} 1995#endif 1996 1997#include "opt_ddb.h" 1998#ifdef DDB 1999#include <sys/kernel.h> 2000 2001#include <sys/cons.h> 2002 2003#include <ddb/ddb.h> 2004 2005static int 2006_vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry) 2007{ 2008 vm_map_t tmpm; 2009 vm_map_entry_t tmpe; 2010 vm_object_t obj; 2011 int entcount; 2012 2013 if (map == 0) 2014 return 0; 2015 2016 if (entry == 0) { 2017 tmpe = map->header.next; 2018 entcount = map->nentries; 2019 while (entcount-- && (tmpe != &map->header)) { 2020 if (_vm_object_in_map(map, object, tmpe)) { 2021 return 1; 2022 } 2023 tmpe = tmpe->next; 2024 } 2025 } else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 2026 tmpm = entry->object.sub_map; 2027 tmpe = tmpm->header.next; 2028 entcount = tmpm->nentries; 2029 while (entcount-- && tmpe != &tmpm->header) { 2030 if (_vm_object_in_map(tmpm, object, tmpe)) { 2031 return 1; 2032 } 2033 tmpe = tmpe->next; 2034 } 2035 } else if ((obj = entry->object.vm_object) != NULL) { 2036 for (; obj; obj = obj->backing_object) 2037 if (obj == object) { 2038 return 1; 2039 } 2040 } 2041 return 0; 2042} 2043 2044static int 2045vm_object_in_map(vm_object_t object) 2046{ 2047 struct proc *p; 2048 2049 /* sx_slock(&allproc_lock); */ 2050 LIST_FOREACH(p, &allproc, p_list) { 2051 if (!p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */) 2052 continue; 2053 if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) { 2054 /* sx_sunlock(&allproc_lock); */ 2055 return 1; 2056 } 2057 } 2058 /* sx_sunlock(&allproc_lock); */ 2059 if (_vm_object_in_map(kernel_map, object, 0)) 2060 return 1; 2061 if (_vm_object_in_map(kmem_map, object, 0)) 2062 return 1; 2063 if (_vm_object_in_map(pager_map, object, 0)) 2064 return 1; 2065 if (_vm_object_in_map(buffer_map, object, 0)) 2066 return 1; 2067 return 0; 2068} 2069 2070DB_SHOW_COMMAND(vmochk, vm_object_check) 2071{ 2072 vm_object_t object; 2073 2074 /* 2075 * make sure that internal objs are in a map somewhere 2076 * and none have zero ref counts. 2077 */ 2078 TAILQ_FOREACH(object, &vm_object_list, object_list) { 2079 if (object->handle == NULL && 2080 (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) { 2081 if (object->ref_count == 0) { 2082 db_printf("vmochk: internal obj has zero ref count: %ld\n", 2083 (long)object->size); 2084 } 2085 if (!vm_object_in_map(object)) { 2086 db_printf( 2087 "vmochk: internal obj is not in a map: " 2088 "ref: %d, size: %lu: 0x%lx, backing_object: %p\n", 2089 object->ref_count, (u_long)object->size, 2090 (u_long)object->size, 2091 (void *)object->backing_object); 2092 } 2093 } 2094 } 2095} 2096 2097/* 2098 * vm_object_print: [ debug ] 2099 */ 2100DB_SHOW_COMMAND(object, vm_object_print_static) 2101{ 2102 /* XXX convert args. */ 2103 vm_object_t object = (vm_object_t)addr; 2104 boolean_t full = have_addr; 2105 2106 vm_page_t p; 2107 2108 /* XXX count is an (unused) arg. Avoid shadowing it. */ 2109#define count was_count 2110 2111 int count; 2112 2113 if (object == NULL) 2114 return; 2115 2116 db_iprintf( 2117 "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x\n", 2118 object, (int)object->type, (uintmax_t)object->size, 2119 object->resident_page_count, object->ref_count, object->flags); 2120 db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n", 2121 object->shadow_count, 2122 object->backing_object ? object->backing_object->ref_count : 0, 2123 object->backing_object, (uintmax_t)object->backing_object_offset); 2124 2125 if (!full) 2126 return; 2127 2128 db_indent += 2; 2129 count = 0; 2130 TAILQ_FOREACH(p, &object->memq, listq) { 2131 if (count == 0) 2132 db_iprintf("memory:="); 2133 else if (count == 6) { 2134 db_printf("\n"); 2135 db_iprintf(" ..."); 2136 count = 0; 2137 } else 2138 db_printf(","); 2139 count++; 2140 2141 db_printf("(off=0x%jx,page=0x%jx)", 2142 (uintmax_t)p->pindex, (uintmax_t)VM_PAGE_TO_PHYS(p)); 2143 } 2144 if (count != 0) 2145 db_printf("\n"); 2146 db_indent -= 2; 2147} 2148 2149/* XXX. */ 2150#undef count 2151 2152/* XXX need this non-static entry for calling from vm_map_print. */ 2153void 2154vm_object_print( 2155 /* db_expr_t */ long addr, 2156 boolean_t have_addr, 2157 /* db_expr_t */ long count, 2158 char *modif) 2159{ 2160 vm_object_print_static(addr, have_addr, count, modif); 2161} 2162 2163DB_SHOW_COMMAND(vmopag, vm_object_print_pages) 2164{ 2165 vm_object_t object; 2166 int nl = 0; 2167 int c; 2168 2169 TAILQ_FOREACH(object, &vm_object_list, object_list) { 2170 vm_pindex_t idx, fidx; 2171 vm_pindex_t osize; 2172 vm_offset_t pa = -1, padiff; 2173 int rcount; 2174 vm_page_t m; 2175 2176 db_printf("new object: %p\n", (void *)object); 2177 if (nl > 18) { 2178 c = cngetc(); 2179 if (c != ' ') 2180 return; 2181 nl = 0; 2182 } 2183 nl++; 2184 rcount = 0; 2185 fidx = 0; 2186 osize = object->size; 2187 if (osize > 128) 2188 osize = 128; 2189 for (idx = 0; idx < osize; idx++) { 2190 m = vm_page_lookup(object, idx); 2191 if (m == NULL) { 2192 if (rcount) { 2193 db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 2194 (long)fidx, rcount, (long)pa); 2195 if (nl > 18) { 2196 c = cngetc(); 2197 if (c != ' ') 2198 return; 2199 nl = 0; 2200 } 2201 nl++; 2202 rcount = 0; 2203 } 2204 continue; 2205 } 2206 2207 2208 if (rcount && 2209 (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) { 2210 ++rcount; 2211 continue; 2212 } 2213 if (rcount) { 2214 padiff = pa + rcount * PAGE_SIZE - VM_PAGE_TO_PHYS(m); 2215 padiff >>= PAGE_SHIFT; 2216 padiff &= PQ_L2_MASK; 2217 if (padiff == 0) { 2218 pa = VM_PAGE_TO_PHYS(m) - rcount * PAGE_SIZE; 2219 ++rcount; 2220 continue; 2221 } 2222 db_printf(" index(%ld)run(%d)pa(0x%lx)", 2223 (long)fidx, rcount, (long)pa); 2224 db_printf("pd(%ld)\n", (long)padiff); 2225 if (nl > 18) { 2226 c = cngetc(); 2227 if (c != ' ') 2228 return; 2229 nl = 0; 2230 } 2231 nl++; 2232 } 2233 fidx = idx; 2234 pa = VM_PAGE_TO_PHYS(m); 2235 rcount = 1; 2236 } 2237 if (rcount) { 2238 db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 2239 (long)fidx, rcount, (long)pa); 2240 if (nl > 18) { 2241 c = cngetc(); 2242 if (c != ' ') 2243 return; 2244 nl = 0; 2245 } 2246 nl++; 2247 } 2248 } 2249} 2250#endif /* DDB */ 2251