vm_object.c revision 124084
1/* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 */ 64 65/* 66 * Virtual memory object module. 67 */ 68 69#include <sys/cdefs.h> 70__FBSDID("$FreeBSD: head/sys/vm/vm_object.c 124084 2004-01-02 19:57:45Z alc $"); 71 72#include <sys/param.h> 73#include <sys/systm.h> 74#include <sys/lock.h> 75#include <sys/mman.h> 76#include <sys/mount.h> 77#include <sys/kernel.h> 78#include <sys/sysctl.h> 79#include <sys/mutex.h> 80#include <sys/proc.h> /* for curproc, pageproc */ 81#include <sys/socket.h> 82#include <sys/vnode.h> 83#include <sys/vmmeter.h> 84#include <sys/sx.h> 85 86#include <vm/vm.h> 87#include <vm/vm_param.h> 88#include <vm/pmap.h> 89#include <vm/vm_map.h> 90#include <vm/vm_object.h> 91#include <vm/vm_page.h> 92#include <vm/vm_pageout.h> 93#include <vm/vm_pager.h> 94#include <vm/swap_pager.h> 95#include <vm/vm_kern.h> 96#include <vm/vm_extern.h> 97#include <vm/uma.h> 98 99#define EASY_SCAN_FACTOR 8 100 101#define MSYNC_FLUSH_HARDSEQ 0x01 102#define MSYNC_FLUSH_SOFTSEQ 0x02 103 104/* 105 * msync / VM object flushing optimizations 106 */ 107static int msync_flush_flags = MSYNC_FLUSH_HARDSEQ | MSYNC_FLUSH_SOFTSEQ; 108SYSCTL_INT(_vm, OID_AUTO, msync_flush_flags, 109 CTLFLAG_RW, &msync_flush_flags, 0, ""); 110 111static void vm_object_qcollapse(vm_object_t object); 112static int vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration, int pagerflags); 113 114/* 115 * Virtual memory objects maintain the actual data 116 * associated with allocated virtual memory. A given 117 * page of memory exists within exactly one object. 118 * 119 * An object is only deallocated when all "references" 120 * are given up. Only one "reference" to a given 121 * region of an object should be writeable. 122 * 123 * Associated with each object is a list of all resident 124 * memory pages belonging to that object; this list is 125 * maintained by the "vm_page" module, and locked by the object's 126 * lock. 127 * 128 * Each object also records a "pager" routine which is 129 * used to retrieve (and store) pages to the proper backing 130 * storage. In addition, objects may be backed by other 131 * objects from which they were virtual-copied. 132 * 133 * The only items within the object structure which are 134 * modified after time of creation are: 135 * reference count locked by object's lock 136 * pager routine locked by object's lock 137 * 138 */ 139 140struct object_q vm_object_list; 141struct mtx vm_object_list_mtx; /* lock for object list and count */ 142 143struct vm_object kernel_object_store; 144struct vm_object kmem_object_store; 145 146static long object_collapses; 147static long object_bypasses; 148static int next_index; 149static uma_zone_t obj_zone; 150#define VM_OBJECTS_INIT 256 151 152static void vm_object_zinit(void *mem, int size); 153 154#ifdef INVARIANTS 155static void vm_object_zdtor(void *mem, int size, void *arg); 156 157static void 158vm_object_zdtor(void *mem, int size, void *arg) 159{ 160 vm_object_t object; 161 162 object = (vm_object_t)mem; 163 KASSERT(TAILQ_EMPTY(&object->memq), 164 ("object %p has resident pages", 165 object)); 166 KASSERT(object->paging_in_progress == 0, 167 ("object %p paging_in_progress = %d", 168 object, object->paging_in_progress)); 169 KASSERT(object->resident_page_count == 0, 170 ("object %p resident_page_count = %d", 171 object, object->resident_page_count)); 172 KASSERT(object->shadow_count == 0, 173 ("object %p shadow_count = %d", 174 object, object->shadow_count)); 175} 176#endif 177 178static void 179vm_object_zinit(void *mem, int size) 180{ 181 vm_object_t object; 182 183 object = (vm_object_t)mem; 184 bzero(&object->mtx, sizeof(object->mtx)); 185 VM_OBJECT_LOCK_INIT(object); 186 187 /* These are true for any object that has been freed */ 188 object->paging_in_progress = 0; 189 object->resident_page_count = 0; 190 object->shadow_count = 0; 191} 192 193void 194_vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object) 195{ 196 int incr; 197 198 TAILQ_INIT(&object->memq); 199 LIST_INIT(&object->shadow_head); 200 201 object->root = NULL; 202 object->type = type; 203 object->size = size; 204 object->generation = 1; 205 object->ref_count = 1; 206 object->flags = 0; 207 if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP)) 208 object->flags = OBJ_ONEMAPPING; 209 if (size > (PQ_L2_SIZE / 3 + PQ_PRIME1)) 210 incr = PQ_L2_SIZE / 3 + PQ_PRIME1; 211 else 212 incr = size; 213 do 214 object->pg_color = next_index; 215 while (!atomic_cmpset_int(&next_index, object->pg_color, 216 (object->pg_color + incr) & PQ_L2_MASK)); 217 object->handle = NULL; 218 object->backing_object = NULL; 219 object->backing_object_offset = (vm_ooffset_t) 0; 220 221 mtx_lock(&vm_object_list_mtx); 222 TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); 223 mtx_unlock(&vm_object_list_mtx); 224} 225 226/* 227 * vm_object_init: 228 * 229 * Initialize the VM objects module. 230 */ 231void 232vm_object_init(void) 233{ 234 TAILQ_INIT(&vm_object_list); 235 mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF); 236 237 VM_OBJECT_LOCK_INIT(&kernel_object_store); 238 _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), 239 kernel_object); 240 241 /* 242 * The kmem object's mutex is given a unique name, instead of 243 * "vm object", to avoid false reports of lock-order reversal 244 * with a system map mutex. 245 */ 246 mtx_init(VM_OBJECT_MTX(kmem_object), "kmem object", NULL, MTX_DEF); 247 _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), 248 kmem_object); 249 250 obj_zone = uma_zcreate("VM OBJECT", sizeof (struct vm_object), NULL, 251#ifdef INVARIANTS 252 vm_object_zdtor, 253#else 254 NULL, 255#endif 256 vm_object_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM|UMA_ZONE_NOFREE); 257 uma_prealloc(obj_zone, VM_OBJECTS_INIT); 258} 259 260void 261vm_object_clear_flag(vm_object_t object, u_short bits) 262{ 263 264 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 265 object->flags &= ~bits; 266} 267 268void 269vm_object_pip_add(vm_object_t object, short i) 270{ 271 272 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 273 object->paging_in_progress += i; 274} 275 276void 277vm_object_pip_subtract(vm_object_t object, short i) 278{ 279 280 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 281 object->paging_in_progress -= i; 282} 283 284void 285vm_object_pip_wakeup(vm_object_t object) 286{ 287 288 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 289 object->paging_in_progress--; 290 if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) { 291 vm_object_clear_flag(object, OBJ_PIPWNT); 292 wakeup(object); 293 } 294} 295 296void 297vm_object_pip_wakeupn(vm_object_t object, short i) 298{ 299 300 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 301 if (i) 302 object->paging_in_progress -= i; 303 if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) { 304 vm_object_clear_flag(object, OBJ_PIPWNT); 305 wakeup(object); 306 } 307} 308 309void 310vm_object_pip_wait(vm_object_t object, char *waitid) 311{ 312 313 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 314 while (object->paging_in_progress) { 315 object->flags |= OBJ_PIPWNT; 316 msleep(object, VM_OBJECT_MTX(object), PVM, waitid, 0); 317 } 318} 319 320/* 321 * vm_object_allocate_wait 322 * 323 * Return a new object with the given size, and give the user the 324 * option of waiting for it to complete or failing if the needed 325 * memory isn't available. 326 */ 327vm_object_t 328vm_object_allocate_wait(objtype_t type, vm_pindex_t size, int flags) 329{ 330 vm_object_t result; 331 332 result = (vm_object_t) uma_zalloc(obj_zone, flags); 333 334 if (result != NULL) 335 _vm_object_allocate(type, size, result); 336 337 return (result); 338} 339 340/* 341 * vm_object_allocate: 342 * 343 * Returns a new object with the given size. 344 */ 345vm_object_t 346vm_object_allocate(objtype_t type, vm_pindex_t size) 347{ 348 return(vm_object_allocate_wait(type, size, M_WAITOK)); 349} 350 351 352/* 353 * vm_object_reference: 354 * 355 * Gets another reference to the given object. Note: OBJ_DEAD 356 * objects can be referenced during final cleaning. 357 */ 358void 359vm_object_reference(vm_object_t object) 360{ 361 struct vnode *vp; 362 int flags; 363 364 if (object == NULL) 365 return; 366 VM_OBJECT_LOCK(object); 367 object->ref_count++; 368 if (object->type == OBJT_VNODE) { 369 vp = object->handle; 370 VI_LOCK(vp); 371 VM_OBJECT_UNLOCK(object); 372 for (flags = LK_INTERLOCK; vget(vp, flags, curthread); 373 flags = 0) 374 printf("vm_object_reference: delay in vget\n"); 375 } else 376 VM_OBJECT_UNLOCK(object); 377} 378 379/* 380 * vm_object_reference_locked: 381 * 382 * Gets another reference to the given object. 383 * 384 * The object must be locked. 385 */ 386void 387vm_object_reference_locked(vm_object_t object) 388{ 389 struct vnode *vp; 390 391 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 392 KASSERT((object->flags & OBJ_DEAD) == 0, 393 ("vm_object_reference_locked: dead object referenced")); 394 object->ref_count++; 395 if (object->type == OBJT_VNODE) { 396 vp = object->handle; 397 vref(vp); 398 } 399} 400 401/* 402 * Handle deallocating an object of type OBJT_VNODE. 403 */ 404void 405vm_object_vndeallocate(vm_object_t object) 406{ 407 struct vnode *vp = (struct vnode *) object->handle; 408 409 GIANT_REQUIRED; 410 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 411 KASSERT(object->type == OBJT_VNODE, 412 ("vm_object_vndeallocate: not a vnode object")); 413 KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp")); 414#ifdef INVARIANTS 415 if (object->ref_count == 0) { 416 vprint("vm_object_vndeallocate", vp); 417 panic("vm_object_vndeallocate: bad object reference count"); 418 } 419#endif 420 421 object->ref_count--; 422 if (object->ref_count == 0) { 423 mp_fixme("Unlocked vflag access."); 424 vp->v_vflag &= ~VV_TEXT; 425 } 426 VM_OBJECT_UNLOCK(object); 427 /* 428 * vrele may need a vop lock 429 */ 430 vrele(vp); 431} 432 433/* 434 * vm_object_deallocate: 435 * 436 * Release a reference to the specified object, 437 * gained either through a vm_object_allocate 438 * or a vm_object_reference call. When all references 439 * are gone, storage associated with this object 440 * may be relinquished. 441 * 442 * No object may be locked. 443 */ 444void 445vm_object_deallocate(vm_object_t object) 446{ 447 vm_object_t temp; 448 449 if (object != kmem_object) 450 mtx_lock(&Giant); 451 while (object != NULL) { 452 VM_OBJECT_LOCK(object); 453 if (object->type == OBJT_VNODE) { 454 vm_object_vndeallocate(object); 455 goto done; 456 } 457 458 KASSERT(object->ref_count != 0, 459 ("vm_object_deallocate: object deallocated too many times: %d", object->type)); 460 461 /* 462 * If the reference count goes to 0 we start calling 463 * vm_object_terminate() on the object chain. 464 * A ref count of 1 may be a special case depending on the 465 * shadow count being 0 or 1. 466 */ 467 object->ref_count--; 468 if (object->ref_count > 1) { 469 VM_OBJECT_UNLOCK(object); 470 goto done; 471 } else if (object->ref_count == 1) { 472 if (object->shadow_count == 0) { 473 vm_object_set_flag(object, OBJ_ONEMAPPING); 474 } else if ((object->shadow_count == 1) && 475 (object->handle == NULL) && 476 (object->type == OBJT_DEFAULT || 477 object->type == OBJT_SWAP)) { 478 vm_object_t robject; 479 480 robject = LIST_FIRST(&object->shadow_head); 481 KASSERT(robject != NULL, 482 ("vm_object_deallocate: ref_count: %d, shadow_count: %d", 483 object->ref_count, 484 object->shadow_count)); 485 if (!VM_OBJECT_TRYLOCK(robject)) { 486 /* 487 * Avoid a potential deadlock. 488 */ 489 object->ref_count++; 490 VM_OBJECT_UNLOCK(object); 491 continue; 492 } 493 if ((robject->handle == NULL) && 494 (robject->type == OBJT_DEFAULT || 495 robject->type == OBJT_SWAP)) { 496 497 robject->ref_count++; 498retry: 499 if (robject->paging_in_progress) { 500 VM_OBJECT_UNLOCK(object); 501 vm_object_pip_wait(robject, 502 "objde1"); 503 VM_OBJECT_LOCK(object); 504 goto retry; 505 } else if (object->paging_in_progress) { 506 VM_OBJECT_UNLOCK(robject); 507 object->flags |= OBJ_PIPWNT; 508 msleep(object, 509 VM_OBJECT_MTX(object), 510 PDROP | PVM, "objde2", 0); 511 VM_OBJECT_LOCK(robject); 512 VM_OBJECT_LOCK(object); 513 goto retry; 514 } 515 VM_OBJECT_UNLOCK(object); 516 if (robject->ref_count == 1) { 517 robject->ref_count--; 518 object = robject; 519 goto doterm; 520 } 521 object = robject; 522 vm_object_collapse(object); 523 VM_OBJECT_UNLOCK(object); 524 continue; 525 } 526 VM_OBJECT_UNLOCK(robject); 527 } 528 VM_OBJECT_UNLOCK(object); 529 goto done; 530 } 531doterm: 532 temp = object->backing_object; 533 if (temp != NULL) { 534 VM_OBJECT_LOCK(temp); 535 LIST_REMOVE(object, shadow_list); 536 temp->shadow_count--; 537 temp->generation++; 538 VM_OBJECT_UNLOCK(temp); 539 object->backing_object = NULL; 540 } 541 /* 542 * Don't double-terminate, we could be in a termination 543 * recursion due to the terminate having to sync data 544 * to disk. 545 */ 546 if ((object->flags & OBJ_DEAD) == 0) 547 vm_object_terminate(object); 548 else 549 VM_OBJECT_UNLOCK(object); 550 object = temp; 551 } 552done: 553 if (object != kmem_object) 554 mtx_unlock(&Giant); 555} 556 557/* 558 * vm_object_terminate actually destroys the specified object, freeing 559 * up all previously used resources. 560 * 561 * The object must be locked. 562 * This routine may block. 563 */ 564void 565vm_object_terminate(vm_object_t object) 566{ 567 vm_page_t p; 568 int s; 569 570 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 571 572 /* 573 * Make sure no one uses us. 574 */ 575 vm_object_set_flag(object, OBJ_DEAD); 576 577 /* 578 * wait for the pageout daemon to be done with the object 579 */ 580 vm_object_pip_wait(object, "objtrm"); 581 582 KASSERT(!object->paging_in_progress, 583 ("vm_object_terminate: pageout in progress")); 584 585 /* 586 * Clean and free the pages, as appropriate. All references to the 587 * object are gone, so we don't need to lock it. 588 */ 589 if (object->type == OBJT_VNODE) { 590 struct vnode *vp = (struct vnode *)object->handle; 591 592 /* 593 * Clean pages and flush buffers. 594 */ 595 vm_object_page_clean(object, 0, 0, OBJPC_SYNC); 596 VM_OBJECT_UNLOCK(object); 597 598 vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0); 599 600 VM_OBJECT_LOCK(object); 601 } 602 603 KASSERT(object->ref_count == 0, 604 ("vm_object_terminate: object with references, ref_count=%d", 605 object->ref_count)); 606 607 /* 608 * Now free any remaining pages. For internal objects, this also 609 * removes them from paging queues. Don't free wired pages, just 610 * remove them from the object. 611 */ 612 s = splvm(); 613 vm_page_lock_queues(); 614 while ((p = TAILQ_FIRST(&object->memq)) != NULL) { 615 KASSERT(!p->busy && (p->flags & PG_BUSY) == 0, 616 ("vm_object_terminate: freeing busy page %p " 617 "p->busy = %d, p->flags %x\n", p, p->busy, p->flags)); 618 if (p->wire_count == 0) { 619 vm_page_busy(p); 620 vm_page_free(p); 621 cnt.v_pfree++; 622 } else { 623 vm_page_busy(p); 624 vm_page_remove(p); 625 } 626 } 627 vm_page_unlock_queues(); 628 splx(s); 629 630 /* 631 * Let the pager know object is dead. 632 */ 633 vm_pager_deallocate(object); 634 VM_OBJECT_UNLOCK(object); 635 636 /* 637 * Remove the object from the global object list. 638 */ 639 mtx_lock(&vm_object_list_mtx); 640 TAILQ_REMOVE(&vm_object_list, object, object_list); 641 mtx_unlock(&vm_object_list_mtx); 642 643 wakeup(object); 644 645 /* 646 * Free the space for the object. 647 */ 648 uma_zfree(obj_zone, object); 649} 650 651/* 652 * vm_object_page_clean 653 * 654 * Clean all dirty pages in the specified range of object. Leaves page 655 * on whatever queue it is currently on. If NOSYNC is set then do not 656 * write out pages with PG_NOSYNC set (originally comes from MAP_NOSYNC), 657 * leaving the object dirty. 658 * 659 * When stuffing pages asynchronously, allow clustering. XXX we need a 660 * synchronous clustering mode implementation. 661 * 662 * Odd semantics: if start == end, we clean everything. 663 * 664 * The object must be locked. 665 */ 666void 667vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int flags) 668{ 669 vm_page_t p, np; 670 vm_pindex_t tstart, tend; 671 vm_pindex_t pi; 672 int clearobjflags; 673 int pagerflags; 674 int curgeneration; 675 676 GIANT_REQUIRED; 677 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 678 if (object->type != OBJT_VNODE || 679 (object->flags & OBJ_MIGHTBEDIRTY) == 0) 680 return; 681 682 pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) ? VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK; 683 pagerflags |= (flags & OBJPC_INVAL) ? VM_PAGER_PUT_INVAL : 0; 684 685 vm_object_set_flag(object, OBJ_CLEANING); 686 687 tstart = start; 688 if (end == 0) { 689 tend = object->size; 690 } else { 691 tend = end; 692 } 693 694 vm_page_lock_queues(); 695 /* 696 * If the caller is smart and only msync()s a range he knows is 697 * dirty, we may be able to avoid an object scan. This results in 698 * a phenominal improvement in performance. We cannot do this 699 * as a matter of course because the object may be huge - e.g. 700 * the size might be in the gigabytes or terrabytes. 701 */ 702 if (msync_flush_flags & MSYNC_FLUSH_HARDSEQ) { 703 vm_pindex_t tscan; 704 int scanlimit; 705 int scanreset; 706 707 scanreset = object->resident_page_count / EASY_SCAN_FACTOR; 708 if (scanreset < 16) 709 scanreset = 16; 710 pagerflags |= VM_PAGER_IGNORE_CLEANCHK; 711 712 scanlimit = scanreset; 713 tscan = tstart; 714 while (tscan < tend) { 715 curgeneration = object->generation; 716 p = vm_page_lookup(object, tscan); 717 if (p == NULL || p->valid == 0 || 718 (p->queue - p->pc) == PQ_CACHE) { 719 if (--scanlimit == 0) 720 break; 721 ++tscan; 722 continue; 723 } 724 vm_page_test_dirty(p); 725 if ((p->dirty & p->valid) == 0) { 726 if (--scanlimit == 0) 727 break; 728 ++tscan; 729 continue; 730 } 731 /* 732 * If we have been asked to skip nosync pages and 733 * this is a nosync page, we can't continue. 734 */ 735 if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) { 736 if (--scanlimit == 0) 737 break; 738 ++tscan; 739 continue; 740 } 741 scanlimit = scanreset; 742 743 /* 744 * This returns 0 if it was unable to busy the first 745 * page (i.e. had to sleep). 746 */ 747 tscan += vm_object_page_collect_flush(object, p, curgeneration, pagerflags); 748 } 749 750 /* 751 * If everything was dirty and we flushed it successfully, 752 * and the requested range is not the entire object, we 753 * don't have to mess with CLEANCHK or MIGHTBEDIRTY and can 754 * return immediately. 755 */ 756 if (tscan >= tend && (tstart || tend < object->size)) { 757 vm_page_unlock_queues(); 758 vm_object_clear_flag(object, OBJ_CLEANING); 759 return; 760 } 761 pagerflags &= ~VM_PAGER_IGNORE_CLEANCHK; 762 } 763 764 /* 765 * Generally set CLEANCHK interlock and make the page read-only so 766 * we can then clear the object flags. 767 * 768 * However, if this is a nosync mmap then the object is likely to 769 * stay dirty so do not mess with the page and do not clear the 770 * object flags. 771 */ 772 clearobjflags = 1; 773 TAILQ_FOREACH(p, &object->memq, listq) { 774 vm_page_flag_set(p, PG_CLEANCHK); 775 if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) 776 clearobjflags = 0; 777 else 778 pmap_page_protect(p, VM_PROT_READ); 779 } 780 781 if (clearobjflags && (tstart == 0) && (tend == object->size)) { 782 struct vnode *vp; 783 784 vm_object_clear_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY); 785 if (object->type == OBJT_VNODE && 786 (vp = (struct vnode *)object->handle) != NULL) { 787 VI_LOCK(vp); 788 if (vp->v_iflag & VI_OBJDIRTY) 789 vp->v_iflag &= ~VI_OBJDIRTY; 790 VI_UNLOCK(vp); 791 } 792 } 793 794rescan: 795 curgeneration = object->generation; 796 797 for (p = TAILQ_FIRST(&object->memq); p; p = np) { 798 int n; 799 800 np = TAILQ_NEXT(p, listq); 801 802again: 803 pi = p->pindex; 804 if (((p->flags & PG_CLEANCHK) == 0) || 805 (pi < tstart) || (pi >= tend) || 806 (p->valid == 0) || 807 ((p->queue - p->pc) == PQ_CACHE)) { 808 vm_page_flag_clear(p, PG_CLEANCHK); 809 continue; 810 } 811 812 vm_page_test_dirty(p); 813 if ((p->dirty & p->valid) == 0) { 814 vm_page_flag_clear(p, PG_CLEANCHK); 815 continue; 816 } 817 818 /* 819 * If we have been asked to skip nosync pages and this is a 820 * nosync page, skip it. Note that the object flags were 821 * not cleared in this case so we do not have to set them. 822 */ 823 if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) { 824 vm_page_flag_clear(p, PG_CLEANCHK); 825 continue; 826 } 827 828 n = vm_object_page_collect_flush(object, p, 829 curgeneration, pagerflags); 830 if (n == 0) 831 goto rescan; 832 833 if (object->generation != curgeneration) 834 goto rescan; 835 836 /* 837 * Try to optimize the next page. If we can't we pick up 838 * our (random) scan where we left off. 839 */ 840 if (msync_flush_flags & MSYNC_FLUSH_SOFTSEQ) { 841 if ((p = vm_page_lookup(object, pi + n)) != NULL) 842 goto again; 843 } 844 } 845 vm_page_unlock_queues(); 846#if 0 847 VOP_FSYNC(vp, NULL, (pagerflags & VM_PAGER_PUT_SYNC)?MNT_WAIT:0, curproc); 848#endif 849 850 vm_object_clear_flag(object, OBJ_CLEANING); 851 return; 852} 853 854static int 855vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration, int pagerflags) 856{ 857 int runlen; 858 int s; 859 int maxf; 860 int chkb; 861 int maxb; 862 int i; 863 vm_pindex_t pi; 864 vm_page_t maf[vm_pageout_page_count]; 865 vm_page_t mab[vm_pageout_page_count]; 866 vm_page_t ma[vm_pageout_page_count]; 867 868 s = splvm(); 869 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 870 pi = p->pindex; 871 while (vm_page_sleep_if_busy(p, TRUE, "vpcwai")) { 872 vm_page_lock_queues(); 873 if (object->generation != curgeneration) { 874 splx(s); 875 return(0); 876 } 877 } 878 maxf = 0; 879 for(i = 1; i < vm_pageout_page_count; i++) { 880 vm_page_t tp; 881 882 if ((tp = vm_page_lookup(object, pi + i)) != NULL) { 883 if ((tp->flags & PG_BUSY) || 884 ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 && 885 (tp->flags & PG_CLEANCHK) == 0) || 886 (tp->busy != 0)) 887 break; 888 if((tp->queue - tp->pc) == PQ_CACHE) { 889 vm_page_flag_clear(tp, PG_CLEANCHK); 890 break; 891 } 892 vm_page_test_dirty(tp); 893 if ((tp->dirty & tp->valid) == 0) { 894 vm_page_flag_clear(tp, PG_CLEANCHK); 895 break; 896 } 897 maf[ i - 1 ] = tp; 898 maxf++; 899 continue; 900 } 901 break; 902 } 903 904 maxb = 0; 905 chkb = vm_pageout_page_count - maxf; 906 if (chkb) { 907 for(i = 1; i < chkb;i++) { 908 vm_page_t tp; 909 910 if ((tp = vm_page_lookup(object, pi - i)) != NULL) { 911 if ((tp->flags & PG_BUSY) || 912 ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 && 913 (tp->flags & PG_CLEANCHK) == 0) || 914 (tp->busy != 0)) 915 break; 916 if ((tp->queue - tp->pc) == PQ_CACHE) { 917 vm_page_flag_clear(tp, PG_CLEANCHK); 918 break; 919 } 920 vm_page_test_dirty(tp); 921 if ((tp->dirty & tp->valid) == 0) { 922 vm_page_flag_clear(tp, PG_CLEANCHK); 923 break; 924 } 925 mab[ i - 1 ] = tp; 926 maxb++; 927 continue; 928 } 929 break; 930 } 931 } 932 933 for(i = 0; i < maxb; i++) { 934 int index = (maxb - i) - 1; 935 ma[index] = mab[i]; 936 vm_page_flag_clear(ma[index], PG_CLEANCHK); 937 } 938 vm_page_flag_clear(p, PG_CLEANCHK); 939 ma[maxb] = p; 940 for(i = 0; i < maxf; i++) { 941 int index = (maxb + i) + 1; 942 ma[index] = maf[i]; 943 vm_page_flag_clear(ma[index], PG_CLEANCHK); 944 } 945 runlen = maxb + maxf + 1; 946 947 splx(s); 948 vm_pageout_flush(ma, runlen, pagerflags); 949 for (i = 0; i < runlen; i++) { 950 if (ma[i]->valid & ma[i]->dirty) { 951 pmap_page_protect(ma[i], VM_PROT_READ); 952 vm_page_flag_set(ma[i], PG_CLEANCHK); 953 954 /* 955 * maxf will end up being the actual number of pages 956 * we wrote out contiguously, non-inclusive of the 957 * first page. We do not count look-behind pages. 958 */ 959 if (i >= maxb + 1 && (maxf > i - maxb - 1)) 960 maxf = i - maxb - 1; 961 } 962 } 963 return(maxf + 1); 964} 965 966/* 967 * Note that there is absolutely no sense in writing out 968 * anonymous objects, so we track down the vnode object 969 * to write out. 970 * We invalidate (remove) all pages from the address space 971 * for semantic correctness. 972 * 973 * Note: certain anonymous maps, such as MAP_NOSYNC maps, 974 * may start out with a NULL object. 975 */ 976void 977vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size, 978 boolean_t syncio, boolean_t invalidate) 979{ 980 vm_object_t backing_object; 981 struct vnode *vp; 982 int flags; 983 984 if (object == NULL) 985 return; 986 VM_OBJECT_LOCK(object); 987 while ((backing_object = object->backing_object) != NULL) { 988 VM_OBJECT_LOCK(backing_object); 989 VM_OBJECT_UNLOCK(object); 990 object = backing_object; 991 offset += object->backing_object_offset; 992 if (object->size < OFF_TO_IDX(offset + size)) 993 size = IDX_TO_OFF(object->size) - offset; 994 } 995 /* 996 * Flush pages if writing is allowed, invalidate them 997 * if invalidation requested. Pages undergoing I/O 998 * will be ignored by vm_object_page_remove(). 999 * 1000 * We cannot lock the vnode and then wait for paging 1001 * to complete without deadlocking against vm_fault. 1002 * Instead we simply call vm_object_page_remove() and 1003 * allow it to block internally on a page-by-page 1004 * basis when it encounters pages undergoing async 1005 * I/O. 1006 */ 1007 if (object->type == OBJT_VNODE && 1008 (object->flags & OBJ_MIGHTBEDIRTY) != 0) { 1009 vp = object->handle; 1010 VM_OBJECT_UNLOCK(object); 1011 mtx_lock(&Giant); 1012 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread); 1013 flags = (syncio || invalidate) ? OBJPC_SYNC : 0; 1014 flags |= invalidate ? OBJPC_INVAL : 0; 1015 VM_OBJECT_LOCK(object); 1016 vm_object_page_clean(object, 1017 OFF_TO_IDX(offset), 1018 OFF_TO_IDX(offset + size + PAGE_MASK), 1019 flags); 1020 VM_OBJECT_UNLOCK(object); 1021 VOP_UNLOCK(vp, 0, curthread); 1022 mtx_unlock(&Giant); 1023 VM_OBJECT_LOCK(object); 1024 } 1025 if ((object->type == OBJT_VNODE || 1026 object->type == OBJT_DEVICE) && invalidate) { 1027 vm_object_page_remove(object, 1028 OFF_TO_IDX(offset), 1029 OFF_TO_IDX(offset + size + PAGE_MASK), 1030 FALSE); 1031 } 1032 VM_OBJECT_UNLOCK(object); 1033} 1034 1035/* 1036 * vm_object_madvise: 1037 * 1038 * Implements the madvise function at the object/page level. 1039 * 1040 * MADV_WILLNEED (any object) 1041 * 1042 * Activate the specified pages if they are resident. 1043 * 1044 * MADV_DONTNEED (any object) 1045 * 1046 * Deactivate the specified pages if they are resident. 1047 * 1048 * MADV_FREE (OBJT_DEFAULT/OBJT_SWAP objects, 1049 * OBJ_ONEMAPPING only) 1050 * 1051 * Deactivate and clean the specified pages if they are 1052 * resident. This permits the process to reuse the pages 1053 * without faulting or the kernel to reclaim the pages 1054 * without I/O. 1055 */ 1056void 1057vm_object_madvise(vm_object_t object, vm_pindex_t pindex, int count, int advise) 1058{ 1059 vm_pindex_t end, tpindex; 1060 vm_object_t backing_object, tobject; 1061 vm_page_t m; 1062 1063 if (object == NULL) 1064 return; 1065 end = pindex + count; 1066 /* 1067 * Locate and adjust resident pages 1068 */ 1069 for (; pindex < end; pindex += 1) { 1070relookup: 1071 tobject = object; 1072 tpindex = pindex; 1073 VM_OBJECT_LOCK(tobject); 1074shadowlookup: 1075 /* 1076 * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages 1077 * and those pages must be OBJ_ONEMAPPING. 1078 */ 1079 if (advise == MADV_FREE) { 1080 if ((tobject->type != OBJT_DEFAULT && 1081 tobject->type != OBJT_SWAP) || 1082 (tobject->flags & OBJ_ONEMAPPING) == 0) { 1083 goto unlock_tobject; 1084 } 1085 } 1086 m = vm_page_lookup(tobject, tpindex); 1087 if (m == NULL) { 1088 /* 1089 * There may be swap even if there is no backing page 1090 */ 1091 if (advise == MADV_FREE && tobject->type == OBJT_SWAP) 1092 swap_pager_freespace(tobject, tpindex, 1); 1093 /* 1094 * next object 1095 */ 1096 backing_object = tobject->backing_object; 1097 if (backing_object == NULL) 1098 goto unlock_tobject; 1099 VM_OBJECT_LOCK(backing_object); 1100 VM_OBJECT_UNLOCK(tobject); 1101 tobject = backing_object; 1102 tpindex += OFF_TO_IDX(tobject->backing_object_offset); 1103 goto shadowlookup; 1104 } 1105 /* 1106 * If the page is busy or not in a normal active state, 1107 * we skip it. If the page is not managed there are no 1108 * page queues to mess with. Things can break if we mess 1109 * with pages in any of the below states. 1110 */ 1111 vm_page_lock_queues(); 1112 if (m->hold_count || 1113 m->wire_count || 1114 (m->flags & PG_UNMANAGED) || 1115 m->valid != VM_PAGE_BITS_ALL) { 1116 vm_page_unlock_queues(); 1117 goto unlock_tobject; 1118 } 1119 if (vm_page_sleep_if_busy(m, TRUE, "madvpo")) { 1120 VM_OBJECT_UNLOCK(tobject); 1121 goto relookup; 1122 } 1123 if (advise == MADV_WILLNEED) { 1124 vm_page_activate(m); 1125 } else if (advise == MADV_DONTNEED) { 1126 vm_page_dontneed(m); 1127 } else if (advise == MADV_FREE) { 1128 /* 1129 * Mark the page clean. This will allow the page 1130 * to be freed up by the system. However, such pages 1131 * are often reused quickly by malloc()/free() 1132 * so we do not do anything that would cause 1133 * a page fault if we can help it. 1134 * 1135 * Specifically, we do not try to actually free 1136 * the page now nor do we try to put it in the 1137 * cache (which would cause a page fault on reuse). 1138 * 1139 * But we do make the page is freeable as we 1140 * can without actually taking the step of unmapping 1141 * it. 1142 */ 1143 pmap_clear_modify(m); 1144 m->dirty = 0; 1145 m->act_count = 0; 1146 vm_page_dontneed(m); 1147 } 1148 vm_page_unlock_queues(); 1149 if (advise == MADV_FREE && tobject->type == OBJT_SWAP) 1150 swap_pager_freespace(tobject, tpindex, 1); 1151unlock_tobject: 1152 VM_OBJECT_UNLOCK(tobject); 1153 } 1154} 1155 1156/* 1157 * vm_object_shadow: 1158 * 1159 * Create a new object which is backed by the 1160 * specified existing object range. The source 1161 * object reference is deallocated. 1162 * 1163 * The new object and offset into that object 1164 * are returned in the source parameters. 1165 */ 1166void 1167vm_object_shadow( 1168 vm_object_t *object, /* IN/OUT */ 1169 vm_ooffset_t *offset, /* IN/OUT */ 1170 vm_size_t length) 1171{ 1172 vm_object_t source; 1173 vm_object_t result; 1174 1175 source = *object; 1176 1177 /* 1178 * Don't create the new object if the old object isn't shared. 1179 */ 1180 if (source != NULL) { 1181 VM_OBJECT_LOCK(source); 1182 if (source->ref_count == 1 && 1183 source->handle == NULL && 1184 (source->type == OBJT_DEFAULT || 1185 source->type == OBJT_SWAP)) { 1186 VM_OBJECT_UNLOCK(source); 1187 return; 1188 } 1189 VM_OBJECT_UNLOCK(source); 1190 } 1191 1192 /* 1193 * Allocate a new object with the given length. 1194 */ 1195 result = vm_object_allocate(OBJT_DEFAULT, length); 1196 1197 /* 1198 * The new object shadows the source object, adding a reference to it. 1199 * Our caller changes his reference to point to the new object, 1200 * removing a reference to the source object. Net result: no change 1201 * of reference count. 1202 * 1203 * Try to optimize the result object's page color when shadowing 1204 * in order to maintain page coloring consistency in the combined 1205 * shadowed object. 1206 */ 1207 result->backing_object = source; 1208 if (source != NULL) { 1209 VM_OBJECT_LOCK(source); 1210 LIST_INSERT_HEAD(&source->shadow_head, result, shadow_list); 1211 source->shadow_count++; 1212 source->generation++; 1213 if (length < source->size) 1214 length = source->size; 1215 if (length > PQ_L2_SIZE / 3 + PQ_PRIME1 || 1216 source->generation > 1) 1217 length = PQ_L2_SIZE / 3 + PQ_PRIME1; 1218 result->pg_color = (source->pg_color + 1219 length * source->generation) & PQ_L2_MASK; 1220 VM_OBJECT_UNLOCK(source); 1221 next_index = (result->pg_color + PQ_L2_SIZE / 3 + PQ_PRIME1) & 1222 PQ_L2_MASK; 1223 } 1224 1225 /* 1226 * Store the offset into the source object, and fix up the offset into 1227 * the new object. 1228 */ 1229 result->backing_object_offset = *offset; 1230 1231 /* 1232 * Return the new things 1233 */ 1234 *offset = 0; 1235 *object = result; 1236} 1237 1238/* 1239 * vm_object_split: 1240 * 1241 * Split the pages in a map entry into a new object. This affords 1242 * easier removal of unused pages, and keeps object inheritance from 1243 * being a negative impact on memory usage. 1244 */ 1245void 1246vm_object_split(vm_map_entry_t entry) 1247{ 1248 vm_page_t m; 1249 vm_object_t orig_object, new_object, source; 1250 vm_pindex_t offidxstart, offidxend; 1251 vm_size_t idx, size; 1252 1253 orig_object = entry->object.vm_object; 1254 if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP) 1255 return; 1256 if (orig_object->ref_count <= 1) 1257 return; 1258 VM_OBJECT_UNLOCK(orig_object); 1259 1260 offidxstart = OFF_TO_IDX(entry->offset); 1261 offidxend = offidxstart + OFF_TO_IDX(entry->end - entry->start); 1262 size = offidxend - offidxstart; 1263 1264 /* 1265 * If swap_pager_copy() is later called, it will convert new_object 1266 * into a swap object. 1267 */ 1268 new_object = vm_object_allocate(OBJT_DEFAULT, size); 1269 1270 VM_OBJECT_LOCK(new_object); 1271 VM_OBJECT_LOCK(orig_object); 1272 source = orig_object->backing_object; 1273 if (source != NULL) { 1274 VM_OBJECT_LOCK(source); 1275 LIST_INSERT_HEAD(&source->shadow_head, 1276 new_object, shadow_list); 1277 source->shadow_count++; 1278 source->generation++; 1279 vm_object_reference_locked(source); /* for new_object */ 1280 vm_object_clear_flag(source, OBJ_ONEMAPPING); 1281 VM_OBJECT_UNLOCK(source); 1282 new_object->backing_object_offset = 1283 orig_object->backing_object_offset + entry->offset; 1284 new_object->backing_object = source; 1285 } 1286 for (idx = 0; idx < size; idx++) { 1287 retry: 1288 m = vm_page_lookup(orig_object, offidxstart + idx); 1289 if (m == NULL) 1290 continue; 1291 1292 /* 1293 * We must wait for pending I/O to complete before we can 1294 * rename the page. 1295 * 1296 * We do not have to VM_PROT_NONE the page as mappings should 1297 * not be changed by this operation. 1298 */ 1299 vm_page_lock_queues(); 1300 if ((m->flags & PG_BUSY) || m->busy) { 1301 vm_page_flag_set(m, PG_WANTED | PG_REFERENCED); 1302 VM_OBJECT_UNLOCK(orig_object); 1303 VM_OBJECT_UNLOCK(new_object); 1304 msleep(m, &vm_page_queue_mtx, PDROP | PVM, "spltwt", 0); 1305 VM_OBJECT_LOCK(new_object); 1306 VM_OBJECT_LOCK(orig_object); 1307 goto retry; 1308 } 1309 vm_page_busy(m); 1310 vm_page_rename(m, new_object, idx); 1311 /* page automatically made dirty by rename and cache handled */ 1312 vm_page_busy(m); 1313 vm_page_unlock_queues(); 1314 } 1315 if (orig_object->type == OBJT_SWAP) { 1316 /* 1317 * swap_pager_copy() can sleep, in which case the orig_object's 1318 * and new_object's locks are released and reacquired. 1319 */ 1320 swap_pager_copy(orig_object, new_object, offidxstart, 0); 1321 } 1322 VM_OBJECT_UNLOCK(orig_object); 1323 vm_page_lock_queues(); 1324 TAILQ_FOREACH(m, &new_object->memq, listq) 1325 vm_page_wakeup(m); 1326 vm_page_unlock_queues(); 1327 VM_OBJECT_UNLOCK(new_object); 1328 entry->object.vm_object = new_object; 1329 entry->offset = 0LL; 1330 vm_object_deallocate(orig_object); 1331 VM_OBJECT_LOCK(new_object); 1332} 1333 1334#define OBSC_TEST_ALL_SHADOWED 0x0001 1335#define OBSC_COLLAPSE_NOWAIT 0x0002 1336#define OBSC_COLLAPSE_WAIT 0x0004 1337 1338static int 1339vm_object_backing_scan(vm_object_t object, int op) 1340{ 1341 int s; 1342 int r = 1; 1343 vm_page_t p; 1344 vm_object_t backing_object; 1345 vm_pindex_t backing_offset_index; 1346 1347 s = splvm(); 1348 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1349 VM_OBJECT_LOCK_ASSERT(object->backing_object, MA_OWNED); 1350 1351 backing_object = object->backing_object; 1352 backing_offset_index = OFF_TO_IDX(object->backing_object_offset); 1353 1354 /* 1355 * Initial conditions 1356 */ 1357 if (op & OBSC_TEST_ALL_SHADOWED) { 1358 /* 1359 * We do not want to have to test for the existence of 1360 * swap pages in the backing object. XXX but with the 1361 * new swapper this would be pretty easy to do. 1362 * 1363 * XXX what about anonymous MAP_SHARED memory that hasn't 1364 * been ZFOD faulted yet? If we do not test for this, the 1365 * shadow test may succeed! XXX 1366 */ 1367 if (backing_object->type != OBJT_DEFAULT) { 1368 splx(s); 1369 return (0); 1370 } 1371 } 1372 if (op & OBSC_COLLAPSE_WAIT) { 1373 vm_object_set_flag(backing_object, OBJ_DEAD); 1374 } 1375 1376 /* 1377 * Our scan 1378 */ 1379 p = TAILQ_FIRST(&backing_object->memq); 1380 while (p) { 1381 vm_page_t next = TAILQ_NEXT(p, listq); 1382 vm_pindex_t new_pindex = p->pindex - backing_offset_index; 1383 1384 if (op & OBSC_TEST_ALL_SHADOWED) { 1385 vm_page_t pp; 1386 1387 /* 1388 * Ignore pages outside the parent object's range 1389 * and outside the parent object's mapping of the 1390 * backing object. 1391 * 1392 * note that we do not busy the backing object's 1393 * page. 1394 */ 1395 if ( 1396 p->pindex < backing_offset_index || 1397 new_pindex >= object->size 1398 ) { 1399 p = next; 1400 continue; 1401 } 1402 1403 /* 1404 * See if the parent has the page or if the parent's 1405 * object pager has the page. If the parent has the 1406 * page but the page is not valid, the parent's 1407 * object pager must have the page. 1408 * 1409 * If this fails, the parent does not completely shadow 1410 * the object and we might as well give up now. 1411 */ 1412 1413 pp = vm_page_lookup(object, new_pindex); 1414 if ( 1415 (pp == NULL || pp->valid == 0) && 1416 !vm_pager_has_page(object, new_pindex, NULL, NULL) 1417 ) { 1418 r = 0; 1419 break; 1420 } 1421 } 1422 1423 /* 1424 * Check for busy page 1425 */ 1426 if (op & (OBSC_COLLAPSE_WAIT | OBSC_COLLAPSE_NOWAIT)) { 1427 vm_page_t pp; 1428 1429 vm_page_lock_queues(); 1430 if (op & OBSC_COLLAPSE_NOWAIT) { 1431 if ((p->flags & PG_BUSY) || 1432 !p->valid || 1433 p->hold_count || 1434 p->wire_count || 1435 p->busy) { 1436 vm_page_unlock_queues(); 1437 p = next; 1438 continue; 1439 } 1440 } else if (op & OBSC_COLLAPSE_WAIT) { 1441 if ((p->flags & PG_BUSY) || p->busy) { 1442 vm_page_flag_set(p, 1443 PG_WANTED | PG_REFERENCED); 1444 VM_OBJECT_UNLOCK(backing_object); 1445 VM_OBJECT_UNLOCK(object); 1446 msleep(p, &vm_page_queue_mtx, 1447 PDROP | PVM, "vmocol", 0); 1448 VM_OBJECT_LOCK(object); 1449 VM_OBJECT_LOCK(backing_object); 1450 /* 1451 * If we slept, anything could have 1452 * happened. Since the object is 1453 * marked dead, the backing offset 1454 * should not have changed so we 1455 * just restart our scan. 1456 */ 1457 p = TAILQ_FIRST(&backing_object->memq); 1458 continue; 1459 } 1460 } 1461 1462 /* 1463 * Busy the page 1464 */ 1465 vm_page_busy(p); 1466 vm_page_unlock_queues(); 1467 1468 KASSERT( 1469 p->object == backing_object, 1470 ("vm_object_qcollapse(): object mismatch") 1471 ); 1472 1473 /* 1474 * Destroy any associated swap 1475 */ 1476 if (backing_object->type == OBJT_SWAP) { 1477 swap_pager_freespace( 1478 backing_object, 1479 p->pindex, 1480 1 1481 ); 1482 } 1483 1484 if ( 1485 p->pindex < backing_offset_index || 1486 new_pindex >= object->size 1487 ) { 1488 /* 1489 * Page is out of the parent object's range, we 1490 * can simply destroy it. 1491 */ 1492 vm_page_lock_queues(); 1493 pmap_remove_all(p); 1494 vm_page_free(p); 1495 vm_page_unlock_queues(); 1496 p = next; 1497 continue; 1498 } 1499 1500 pp = vm_page_lookup(object, new_pindex); 1501 if ( 1502 pp != NULL || 1503 vm_pager_has_page(object, new_pindex, NULL, NULL) 1504 ) { 1505 /* 1506 * page already exists in parent OR swap exists 1507 * for this location in the parent. Destroy 1508 * the original page from the backing object. 1509 * 1510 * Leave the parent's page alone 1511 */ 1512 vm_page_lock_queues(); 1513 pmap_remove_all(p); 1514 vm_page_free(p); 1515 vm_page_unlock_queues(); 1516 p = next; 1517 continue; 1518 } 1519 1520 /* 1521 * Page does not exist in parent, rename the 1522 * page from the backing object to the main object. 1523 * 1524 * If the page was mapped to a process, it can remain 1525 * mapped through the rename. 1526 */ 1527 vm_page_lock_queues(); 1528 vm_page_rename(p, object, new_pindex); 1529 vm_page_unlock_queues(); 1530 /* page automatically made dirty by rename */ 1531 } 1532 p = next; 1533 } 1534 splx(s); 1535 return (r); 1536} 1537 1538 1539/* 1540 * this version of collapse allows the operation to occur earlier and 1541 * when paging_in_progress is true for an object... This is not a complete 1542 * operation, but should plug 99.9% of the rest of the leaks. 1543 */ 1544static void 1545vm_object_qcollapse(vm_object_t object) 1546{ 1547 vm_object_t backing_object = object->backing_object; 1548 1549 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1550 VM_OBJECT_LOCK_ASSERT(backing_object, MA_OWNED); 1551 1552 if (backing_object->ref_count != 1) 1553 return; 1554 1555 backing_object->ref_count += 2; 1556 1557 vm_object_backing_scan(object, OBSC_COLLAPSE_NOWAIT); 1558 1559 backing_object->ref_count -= 2; 1560} 1561 1562/* 1563 * vm_object_collapse: 1564 * 1565 * Collapse an object with the object backing it. 1566 * Pages in the backing object are moved into the 1567 * parent, and the backing object is deallocated. 1568 */ 1569void 1570vm_object_collapse(vm_object_t object) 1571{ 1572 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1573 1574 while (TRUE) { 1575 vm_object_t backing_object; 1576 1577 /* 1578 * Verify that the conditions are right for collapse: 1579 * 1580 * The object exists and the backing object exists. 1581 */ 1582 if ((backing_object = object->backing_object) == NULL) 1583 break; 1584 1585 /* 1586 * we check the backing object first, because it is most likely 1587 * not collapsable. 1588 */ 1589 VM_OBJECT_LOCK(backing_object); 1590 if (backing_object->handle != NULL || 1591 (backing_object->type != OBJT_DEFAULT && 1592 backing_object->type != OBJT_SWAP) || 1593 (backing_object->flags & OBJ_DEAD) || 1594 object->handle != NULL || 1595 (object->type != OBJT_DEFAULT && 1596 object->type != OBJT_SWAP) || 1597 (object->flags & OBJ_DEAD)) { 1598 VM_OBJECT_UNLOCK(backing_object); 1599 break; 1600 } 1601 1602 if ( 1603 object->paging_in_progress != 0 || 1604 backing_object->paging_in_progress != 0 1605 ) { 1606 vm_object_qcollapse(object); 1607 VM_OBJECT_UNLOCK(backing_object); 1608 break; 1609 } 1610 /* 1611 * We know that we can either collapse the backing object (if 1612 * the parent is the only reference to it) or (perhaps) have 1613 * the parent bypass the object if the parent happens to shadow 1614 * all the resident pages in the entire backing object. 1615 * 1616 * This is ignoring pager-backed pages such as swap pages. 1617 * vm_object_backing_scan fails the shadowing test in this 1618 * case. 1619 */ 1620 if (backing_object->ref_count == 1) { 1621 /* 1622 * If there is exactly one reference to the backing 1623 * object, we can collapse it into the parent. 1624 */ 1625 vm_object_backing_scan(object, OBSC_COLLAPSE_WAIT); 1626 1627 /* 1628 * Move the pager from backing_object to object. 1629 */ 1630 if (backing_object->type == OBJT_SWAP) { 1631 /* 1632 * swap_pager_copy() can sleep, in which case 1633 * the backing_object's and object's locks are 1634 * released and reacquired. 1635 */ 1636 swap_pager_copy( 1637 backing_object, 1638 object, 1639 OFF_TO_IDX(object->backing_object_offset), TRUE); 1640 } 1641 /* 1642 * Object now shadows whatever backing_object did. 1643 * Note that the reference to 1644 * backing_object->backing_object moves from within 1645 * backing_object to within object. 1646 */ 1647 LIST_REMOVE(object, shadow_list); 1648 backing_object->shadow_count--; 1649 backing_object->generation++; 1650 if (backing_object->backing_object) { 1651 VM_OBJECT_LOCK(backing_object->backing_object); 1652 LIST_REMOVE(backing_object, shadow_list); 1653 LIST_INSERT_HEAD( 1654 &backing_object->backing_object->shadow_head, 1655 object, shadow_list); 1656 /* 1657 * The shadow_count has not changed. 1658 */ 1659 backing_object->backing_object->generation++; 1660 VM_OBJECT_UNLOCK(backing_object->backing_object); 1661 } 1662 object->backing_object = backing_object->backing_object; 1663 object->backing_object_offset += 1664 backing_object->backing_object_offset; 1665 1666 /* 1667 * Discard backing_object. 1668 * 1669 * Since the backing object has no pages, no pager left, 1670 * and no object references within it, all that is 1671 * necessary is to dispose of it. 1672 */ 1673 KASSERT(backing_object->ref_count == 1, ("backing_object %p was somehow re-referenced during collapse!", backing_object)); 1674 VM_OBJECT_UNLOCK(backing_object); 1675 1676 mtx_lock(&vm_object_list_mtx); 1677 TAILQ_REMOVE( 1678 &vm_object_list, 1679 backing_object, 1680 object_list 1681 ); 1682 mtx_unlock(&vm_object_list_mtx); 1683 1684 uma_zfree(obj_zone, backing_object); 1685 1686 object_collapses++; 1687 } else { 1688 vm_object_t new_backing_object; 1689 1690 /* 1691 * If we do not entirely shadow the backing object, 1692 * there is nothing we can do so we give up. 1693 */ 1694 if (vm_object_backing_scan(object, OBSC_TEST_ALL_SHADOWED) == 0) { 1695 VM_OBJECT_UNLOCK(backing_object); 1696 break; 1697 } 1698 1699 /* 1700 * Make the parent shadow the next object in the 1701 * chain. Deallocating backing_object will not remove 1702 * it, since its reference count is at least 2. 1703 */ 1704 LIST_REMOVE(object, shadow_list); 1705 backing_object->shadow_count--; 1706 backing_object->generation++; 1707 1708 new_backing_object = backing_object->backing_object; 1709 if ((object->backing_object = new_backing_object) != NULL) { 1710 VM_OBJECT_LOCK(new_backing_object); 1711 LIST_INSERT_HEAD( 1712 &new_backing_object->shadow_head, 1713 object, 1714 shadow_list 1715 ); 1716 new_backing_object->shadow_count++; 1717 new_backing_object->generation++; 1718 vm_object_reference_locked(new_backing_object); 1719 VM_OBJECT_UNLOCK(new_backing_object); 1720 object->backing_object_offset += 1721 backing_object->backing_object_offset; 1722 } 1723 1724 /* 1725 * Drop the reference count on backing_object. Since 1726 * its ref_count was at least 2, it will not vanish. 1727 */ 1728 backing_object->ref_count--; 1729 VM_OBJECT_UNLOCK(backing_object); 1730 object_bypasses++; 1731 } 1732 1733 /* 1734 * Try again with this object's new backing object. 1735 */ 1736 } 1737} 1738 1739/* 1740 * vm_object_page_remove: 1741 * 1742 * Removes all physical pages in the given range from the 1743 * object's list of pages. If the range's end is zero, all 1744 * physical pages from the range's start to the end of the object 1745 * are deleted. 1746 * 1747 * The object must be locked. 1748 */ 1749void 1750vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, 1751 boolean_t clean_only) 1752{ 1753 vm_page_t p, next; 1754 1755 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1756 if (object->resident_page_count == 0) 1757 return; 1758 1759 /* 1760 * Since physically-backed objects do not use managed pages, we can't 1761 * remove pages from the object (we must instead remove the page 1762 * references, and then destroy the object). 1763 */ 1764 KASSERT(object->type != OBJT_PHYS, 1765 ("attempt to remove pages from a physical object")); 1766 1767 vm_object_pip_add(object, 1); 1768again: 1769 vm_page_lock_queues(); 1770 if ((p = TAILQ_FIRST(&object->memq)) != NULL) { 1771 if (p->pindex < start) { 1772 p = vm_page_splay(start, object->root); 1773 if ((object->root = p)->pindex < start) 1774 p = TAILQ_NEXT(p, listq); 1775 } 1776 } 1777 /* 1778 * Assert: the variable p is either (1) the page with the 1779 * least pindex greater than or equal to the parameter pindex 1780 * or (2) NULL. 1781 */ 1782 for (; 1783 p != NULL && (p->pindex < end || end == 0); 1784 p = next) { 1785 next = TAILQ_NEXT(p, listq); 1786 1787 if (p->wire_count != 0) { 1788 pmap_remove_all(p); 1789 if (!clean_only) 1790 p->valid = 0; 1791 continue; 1792 } 1793 if (vm_page_sleep_if_busy(p, TRUE, "vmopar")) 1794 goto again; 1795 if (clean_only && p->valid) { 1796 vm_page_test_dirty(p); 1797 if (p->valid & p->dirty) 1798 continue; 1799 } 1800 vm_page_busy(p); 1801 pmap_remove_all(p); 1802 vm_page_free(p); 1803 } 1804 vm_page_unlock_queues(); 1805 vm_object_pip_wakeup(object); 1806} 1807 1808/* 1809 * Routine: vm_object_coalesce 1810 * Function: Coalesces two objects backing up adjoining 1811 * regions of memory into a single object. 1812 * 1813 * returns TRUE if objects were combined. 1814 * 1815 * NOTE: Only works at the moment if the second object is NULL - 1816 * if it's not, which object do we lock first? 1817 * 1818 * Parameters: 1819 * prev_object First object to coalesce 1820 * prev_offset Offset into prev_object 1821 * next_object Second object into coalesce 1822 * next_offset Offset into next_object 1823 * 1824 * prev_size Size of reference to prev_object 1825 * next_size Size of reference to next_object 1826 * 1827 * Conditions: 1828 * The object must *not* be locked. 1829 */ 1830boolean_t 1831vm_object_coalesce(vm_object_t prev_object, vm_pindex_t prev_pindex, 1832 vm_size_t prev_size, vm_size_t next_size) 1833{ 1834 vm_pindex_t next_pindex; 1835 1836 if (prev_object == NULL) 1837 return (TRUE); 1838 VM_OBJECT_LOCK(prev_object); 1839 if (prev_object->type != OBJT_DEFAULT && 1840 prev_object->type != OBJT_SWAP) { 1841 VM_OBJECT_UNLOCK(prev_object); 1842 return (FALSE); 1843 } 1844 1845 /* 1846 * Try to collapse the object first 1847 */ 1848 vm_object_collapse(prev_object); 1849 1850 /* 1851 * Can't coalesce if: . more than one reference . paged out . shadows 1852 * another object . has a copy elsewhere (any of which mean that the 1853 * pages not mapped to prev_entry may be in use anyway) 1854 */ 1855 if (prev_object->backing_object != NULL) { 1856 VM_OBJECT_UNLOCK(prev_object); 1857 return (FALSE); 1858 } 1859 1860 prev_size >>= PAGE_SHIFT; 1861 next_size >>= PAGE_SHIFT; 1862 next_pindex = prev_pindex + prev_size; 1863 1864 if ((prev_object->ref_count > 1) && 1865 (prev_object->size != next_pindex)) { 1866 VM_OBJECT_UNLOCK(prev_object); 1867 return (FALSE); 1868 } 1869 1870 /* 1871 * Remove any pages that may still be in the object from a previous 1872 * deallocation. 1873 */ 1874 if (next_pindex < prev_object->size) { 1875 vm_object_page_remove(prev_object, 1876 next_pindex, 1877 next_pindex + next_size, FALSE); 1878 if (prev_object->type == OBJT_SWAP) 1879 swap_pager_freespace(prev_object, 1880 next_pindex, next_size); 1881 } 1882 1883 /* 1884 * Extend the object if necessary. 1885 */ 1886 if (next_pindex + next_size > prev_object->size) 1887 prev_object->size = next_pindex + next_size; 1888 1889 VM_OBJECT_UNLOCK(prev_object); 1890 return (TRUE); 1891} 1892 1893void 1894vm_object_set_writeable_dirty(vm_object_t object) 1895{ 1896 struct vnode *vp; 1897 1898 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1899 vm_object_set_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY); 1900 if (object->type == OBJT_VNODE && 1901 (vp = (struct vnode *)object->handle) != NULL) { 1902 VI_LOCK(vp); 1903 if ((vp->v_iflag & VI_OBJDIRTY) == 0) 1904 vp->v_iflag |= VI_OBJDIRTY; 1905 VI_UNLOCK(vp); 1906 } 1907} 1908 1909#include "opt_ddb.h" 1910#ifdef DDB 1911#include <sys/kernel.h> 1912 1913#include <sys/cons.h> 1914 1915#include <ddb/ddb.h> 1916 1917static int 1918_vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry) 1919{ 1920 vm_map_t tmpm; 1921 vm_map_entry_t tmpe; 1922 vm_object_t obj; 1923 int entcount; 1924 1925 if (map == 0) 1926 return 0; 1927 1928 if (entry == 0) { 1929 tmpe = map->header.next; 1930 entcount = map->nentries; 1931 while (entcount-- && (tmpe != &map->header)) { 1932 if (_vm_object_in_map(map, object, tmpe)) { 1933 return 1; 1934 } 1935 tmpe = tmpe->next; 1936 } 1937 } else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 1938 tmpm = entry->object.sub_map; 1939 tmpe = tmpm->header.next; 1940 entcount = tmpm->nentries; 1941 while (entcount-- && tmpe != &tmpm->header) { 1942 if (_vm_object_in_map(tmpm, object, tmpe)) { 1943 return 1; 1944 } 1945 tmpe = tmpe->next; 1946 } 1947 } else if ((obj = entry->object.vm_object) != NULL) { 1948 for (; obj; obj = obj->backing_object) 1949 if (obj == object) { 1950 return 1; 1951 } 1952 } 1953 return 0; 1954} 1955 1956static int 1957vm_object_in_map(vm_object_t object) 1958{ 1959 struct proc *p; 1960 1961 /* sx_slock(&allproc_lock); */ 1962 LIST_FOREACH(p, &allproc, p_list) { 1963 if (!p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */) 1964 continue; 1965 if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) { 1966 /* sx_sunlock(&allproc_lock); */ 1967 return 1; 1968 } 1969 } 1970 /* sx_sunlock(&allproc_lock); */ 1971 if (_vm_object_in_map(kernel_map, object, 0)) 1972 return 1; 1973 if (_vm_object_in_map(kmem_map, object, 0)) 1974 return 1; 1975 if (_vm_object_in_map(pager_map, object, 0)) 1976 return 1; 1977 if (_vm_object_in_map(buffer_map, object, 0)) 1978 return 1; 1979 return 0; 1980} 1981 1982DB_SHOW_COMMAND(vmochk, vm_object_check) 1983{ 1984 vm_object_t object; 1985 1986 /* 1987 * make sure that internal objs are in a map somewhere 1988 * and none have zero ref counts. 1989 */ 1990 TAILQ_FOREACH(object, &vm_object_list, object_list) { 1991 if (object->handle == NULL && 1992 (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) { 1993 if (object->ref_count == 0) { 1994 db_printf("vmochk: internal obj has zero ref count: %ld\n", 1995 (long)object->size); 1996 } 1997 if (!vm_object_in_map(object)) { 1998 db_printf( 1999 "vmochk: internal obj is not in a map: " 2000 "ref: %d, size: %lu: 0x%lx, backing_object: %p\n", 2001 object->ref_count, (u_long)object->size, 2002 (u_long)object->size, 2003 (void *)object->backing_object); 2004 } 2005 } 2006 } 2007} 2008 2009/* 2010 * vm_object_print: [ debug ] 2011 */ 2012DB_SHOW_COMMAND(object, vm_object_print_static) 2013{ 2014 /* XXX convert args. */ 2015 vm_object_t object = (vm_object_t)addr; 2016 boolean_t full = have_addr; 2017 2018 vm_page_t p; 2019 2020 /* XXX count is an (unused) arg. Avoid shadowing it. */ 2021#define count was_count 2022 2023 int count; 2024 2025 if (object == NULL) 2026 return; 2027 2028 db_iprintf( 2029 "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x\n", 2030 object, (int)object->type, (uintmax_t)object->size, 2031 object->resident_page_count, object->ref_count, object->flags); 2032 db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n", 2033 object->shadow_count, 2034 object->backing_object ? object->backing_object->ref_count : 0, 2035 object->backing_object, (uintmax_t)object->backing_object_offset); 2036 2037 if (!full) 2038 return; 2039 2040 db_indent += 2; 2041 count = 0; 2042 TAILQ_FOREACH(p, &object->memq, listq) { 2043 if (count == 0) 2044 db_iprintf("memory:="); 2045 else if (count == 6) { 2046 db_printf("\n"); 2047 db_iprintf(" ..."); 2048 count = 0; 2049 } else 2050 db_printf(","); 2051 count++; 2052 2053 db_printf("(off=0x%jx,page=0x%jx)", 2054 (uintmax_t)p->pindex, (uintmax_t)VM_PAGE_TO_PHYS(p)); 2055 } 2056 if (count != 0) 2057 db_printf("\n"); 2058 db_indent -= 2; 2059} 2060 2061/* XXX. */ 2062#undef count 2063 2064/* XXX need this non-static entry for calling from vm_map_print. */ 2065void 2066vm_object_print( 2067 /* db_expr_t */ long addr, 2068 boolean_t have_addr, 2069 /* db_expr_t */ long count, 2070 char *modif) 2071{ 2072 vm_object_print_static(addr, have_addr, count, modif); 2073} 2074 2075DB_SHOW_COMMAND(vmopag, vm_object_print_pages) 2076{ 2077 vm_object_t object; 2078 int nl = 0; 2079 int c; 2080 2081 TAILQ_FOREACH(object, &vm_object_list, object_list) { 2082 vm_pindex_t idx, fidx; 2083 vm_pindex_t osize; 2084 vm_paddr_t pa = -1, padiff; 2085 int rcount; 2086 vm_page_t m; 2087 2088 db_printf("new object: %p\n", (void *)object); 2089 if (nl > 18) { 2090 c = cngetc(); 2091 if (c != ' ') 2092 return; 2093 nl = 0; 2094 } 2095 nl++; 2096 rcount = 0; 2097 fidx = 0; 2098 osize = object->size; 2099 if (osize > 128) 2100 osize = 128; 2101 for (idx = 0; idx < osize; idx++) { 2102 m = vm_page_lookup(object, idx); 2103 if (m == NULL) { 2104 if (rcount) { 2105 db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 2106 (long)fidx, rcount, (long)pa); 2107 if (nl > 18) { 2108 c = cngetc(); 2109 if (c != ' ') 2110 return; 2111 nl = 0; 2112 } 2113 nl++; 2114 rcount = 0; 2115 } 2116 continue; 2117 } 2118 2119 2120 if (rcount && 2121 (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) { 2122 ++rcount; 2123 continue; 2124 } 2125 if (rcount) { 2126 padiff = pa + rcount * PAGE_SIZE - VM_PAGE_TO_PHYS(m); 2127 padiff >>= PAGE_SHIFT; 2128 padiff &= PQ_L2_MASK; 2129 if (padiff == 0) { 2130 pa = VM_PAGE_TO_PHYS(m) - rcount * PAGE_SIZE; 2131 ++rcount; 2132 continue; 2133 } 2134 db_printf(" index(%ld)run(%d)pa(0x%lx)", 2135 (long)fidx, rcount, (long)pa); 2136 db_printf("pd(%ld)\n", (long)padiff); 2137 if (nl > 18) { 2138 c = cngetc(); 2139 if (c != ' ') 2140 return; 2141 nl = 0; 2142 } 2143 nl++; 2144 } 2145 fidx = idx; 2146 pa = VM_PAGE_TO_PHYS(m); 2147 rcount = 1; 2148 } 2149 if (rcount) { 2150 db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 2151 (long)fidx, rcount, (long)pa); 2152 if (nl > 18) { 2153 c = cngetc(); 2154 if (c != ' ') 2155 return; 2156 nl = 0; 2157 } 2158 nl++; 2159 } 2160 } 2161} 2162#endif /* DDB */ 2163