vm_page.c revision 236920
1/*- 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1998 Matthew Dillon. All Rights Reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * The Mach Operating System project at Carnegie-Mellon University. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 34 */ 35 36/*- 37 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 38 * All rights reserved. 39 * 40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 */ 62 63/* 64 * GENERAL RULES ON VM_PAGE MANIPULATION 65 * 66 * - a pageq mutex is required when adding or removing a page from a 67 * page queue (vm_page_queue[]), regardless of other mutexes or the 68 * busy state of a page. 69 * 70 * - The object mutex is held when inserting or removing 71 * pages from an object (vm_page_insert() or vm_page_remove()). 72 * 73 */ 74 75/* 76 * Resident memory management module. 77 */ 78 79#include <sys/cdefs.h> 80__FBSDID("$FreeBSD: stable/9/sys/vm/vm_page.c 236920 2012-06-11 20:58:23Z kib $"); 81 82#include "opt_vm.h" 83 84#include <sys/param.h> 85#include <sys/systm.h> 86#include <sys/lock.h> 87#include <sys/kernel.h> 88#include <sys/limits.h> 89#include <sys/malloc.h> 90#include <sys/msgbuf.h> 91#include <sys/mutex.h> 92#include <sys/proc.h> 93#include <sys/sysctl.h> 94#include <sys/vmmeter.h> 95#include <sys/vnode.h> 96 97#include <vm/vm.h> 98#include <vm/pmap.h> 99#include <vm/vm_param.h> 100#include <vm/vm_kern.h> 101#include <vm/vm_object.h> 102#include <vm/vm_page.h> 103#include <vm/vm_pageout.h> 104#include <vm/vm_pager.h> 105#include <vm/vm_phys.h> 106#include <vm/vm_reserv.h> 107#include <vm/vm_extern.h> 108#include <vm/uma.h> 109#include <vm/uma_int.h> 110 111#include <machine/md_var.h> 112 113/* 114 * Associated with page of user-allocatable memory is a 115 * page structure. 116 */ 117 118struct vpgqueues vm_page_queues[PQ_COUNT]; 119struct vpglocks vm_page_queue_lock; 120struct vpglocks vm_page_queue_free_lock; 121 122struct vpglocks pa_lock[PA_LOCK_COUNT]; 123 124vm_page_t vm_page_array; 125long vm_page_array_size; 126long first_page; 127int vm_page_zero_count; 128 129static int boot_pages = UMA_BOOT_PAGES; 130TUNABLE_INT("vm.boot_pages", &boot_pages); 131SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RD, &boot_pages, 0, 132 "number of pages allocated for bootstrapping the VM system"); 133 134int pa_tryrelock_restart; 135SYSCTL_INT(_vm, OID_AUTO, tryrelock_restart, CTLFLAG_RD, 136 &pa_tryrelock_restart, 0, "Number of tryrelock restarts"); 137 138static uma_zone_t fakepg_zone; 139 140static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits); 141static void vm_page_queue_remove(int queue, vm_page_t m); 142static void vm_page_enqueue(int queue, vm_page_t m); 143static void vm_page_init_fakepg(void *dummy); 144 145SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init_fakepg, NULL); 146 147static void 148vm_page_init_fakepg(void *dummy) 149{ 150 151 fakepg_zone = uma_zcreate("fakepg", sizeof(struct vm_page), NULL, NULL, 152 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE | UMA_ZONE_VM); 153} 154 155/* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */ 156#if PAGE_SIZE == 32768 157#ifdef CTASSERT 158CTASSERT(sizeof(u_long) >= 8); 159#endif 160#endif 161 162/* 163 * Try to acquire a physical address lock while a pmap is locked. If we 164 * fail to trylock we unlock and lock the pmap directly and cache the 165 * locked pa in *locked. The caller should then restart their loop in case 166 * the virtual to physical mapping has changed. 167 */ 168int 169vm_page_pa_tryrelock(pmap_t pmap, vm_paddr_t pa, vm_paddr_t *locked) 170{ 171 vm_paddr_t lockpa; 172 173 lockpa = *locked; 174 *locked = pa; 175 if (lockpa) { 176 PA_LOCK_ASSERT(lockpa, MA_OWNED); 177 if (PA_LOCKPTR(pa) == PA_LOCKPTR(lockpa)) 178 return (0); 179 PA_UNLOCK(lockpa); 180 } 181 if (PA_TRYLOCK(pa)) 182 return (0); 183 PMAP_UNLOCK(pmap); 184 atomic_add_int(&pa_tryrelock_restart, 1); 185 PA_LOCK(pa); 186 PMAP_LOCK(pmap); 187 return (EAGAIN); 188} 189 190/* 191 * vm_set_page_size: 192 * 193 * Sets the page size, perhaps based upon the memory 194 * size. Must be called before any use of page-size 195 * dependent functions. 196 */ 197void 198vm_set_page_size(void) 199{ 200 if (cnt.v_page_size == 0) 201 cnt.v_page_size = PAGE_SIZE; 202 if (((cnt.v_page_size - 1) & cnt.v_page_size) != 0) 203 panic("vm_set_page_size: page size not a power of two"); 204} 205 206/* 207 * vm_page_blacklist_lookup: 208 * 209 * See if a physical address in this page has been listed 210 * in the blacklist tunable. Entries in the tunable are 211 * separated by spaces or commas. If an invalid integer is 212 * encountered then the rest of the string is skipped. 213 */ 214static int 215vm_page_blacklist_lookup(char *list, vm_paddr_t pa) 216{ 217 vm_paddr_t bad; 218 char *cp, *pos; 219 220 for (pos = list; *pos != '\0'; pos = cp) { 221 bad = strtoq(pos, &cp, 0); 222 if (*cp != '\0') { 223 if (*cp == ' ' || *cp == ',') { 224 cp++; 225 if (cp == pos) 226 continue; 227 } else 228 break; 229 } 230 if (pa == trunc_page(bad)) 231 return (1); 232 } 233 return (0); 234} 235 236/* 237 * vm_page_startup: 238 * 239 * Initializes the resident memory module. 240 * 241 * Allocates memory for the page cells, and 242 * for the object/offset-to-page hash table headers. 243 * Each page cell is initialized and placed on the free list. 244 */ 245vm_offset_t 246vm_page_startup(vm_offset_t vaddr) 247{ 248 vm_offset_t mapped; 249 vm_paddr_t page_range; 250 vm_paddr_t new_end; 251 int i; 252 vm_paddr_t pa; 253 vm_paddr_t last_pa; 254 char *list; 255 256 /* the biggest memory array is the second group of pages */ 257 vm_paddr_t end; 258 vm_paddr_t biggestsize; 259 vm_paddr_t low_water, high_water; 260 int biggestone; 261 262 biggestsize = 0; 263 biggestone = 0; 264 vaddr = round_page(vaddr); 265 266 for (i = 0; phys_avail[i + 1]; i += 2) { 267 phys_avail[i] = round_page(phys_avail[i]); 268 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]); 269 } 270 271 low_water = phys_avail[0]; 272 high_water = phys_avail[1]; 273 274 for (i = 0; phys_avail[i + 1]; i += 2) { 275 vm_paddr_t size = phys_avail[i + 1] - phys_avail[i]; 276 277 if (size > biggestsize) { 278 biggestone = i; 279 biggestsize = size; 280 } 281 if (phys_avail[i] < low_water) 282 low_water = phys_avail[i]; 283 if (phys_avail[i + 1] > high_water) 284 high_water = phys_avail[i + 1]; 285 } 286 287#ifdef XEN 288 low_water = 0; 289#endif 290 291 end = phys_avail[biggestone+1]; 292 293 /* 294 * Initialize the locks. 295 */ 296 mtx_init(&vm_page_queue_mtx, "vm page queue mutex", NULL, MTX_DEF | 297 MTX_RECURSE); 298 mtx_init(&vm_page_queue_free_mtx, "vm page queue free mutex", NULL, 299 MTX_DEF); 300 301 /* Setup page locks. */ 302 for (i = 0; i < PA_LOCK_COUNT; i++) 303 mtx_init(&pa_lock[i].data, "page lock", NULL, MTX_DEF); 304 305 /* 306 * Initialize the queue headers for the hold queue, the active queue, 307 * and the inactive queue. 308 */ 309 for (i = 0; i < PQ_COUNT; i++) 310 TAILQ_INIT(&vm_page_queues[i].pl); 311 vm_page_queues[PQ_INACTIVE].cnt = &cnt.v_inactive_count; 312 vm_page_queues[PQ_ACTIVE].cnt = &cnt.v_active_count; 313 vm_page_queues[PQ_HOLD].cnt = &cnt.v_active_count; 314 315 /* 316 * Allocate memory for use when boot strapping the kernel memory 317 * allocator. 318 */ 319 new_end = end - (boot_pages * UMA_SLAB_SIZE); 320 new_end = trunc_page(new_end); 321 mapped = pmap_map(&vaddr, new_end, end, 322 VM_PROT_READ | VM_PROT_WRITE); 323 bzero((void *)mapped, end - new_end); 324 uma_startup((void *)mapped, boot_pages); 325 326#if defined(__amd64__) || defined(__i386__) || defined(__arm__) || \ 327 defined(__mips__) 328 /* 329 * Allocate a bitmap to indicate that a random physical page 330 * needs to be included in a minidump. 331 * 332 * The amd64 port needs this to indicate which direct map pages 333 * need to be dumped, via calls to dump_add_page()/dump_drop_page(). 334 * 335 * However, i386 still needs this workspace internally within the 336 * minidump code. In theory, they are not needed on i386, but are 337 * included should the sf_buf code decide to use them. 338 */ 339 last_pa = 0; 340 for (i = 0; dump_avail[i + 1] != 0; i += 2) 341 if (dump_avail[i + 1] > last_pa) 342 last_pa = dump_avail[i + 1]; 343 page_range = last_pa / PAGE_SIZE; 344 vm_page_dump_size = round_page(roundup2(page_range, NBBY) / NBBY); 345 new_end -= vm_page_dump_size; 346 vm_page_dump = (void *)(uintptr_t)pmap_map(&vaddr, new_end, 347 new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE); 348 bzero((void *)vm_page_dump, vm_page_dump_size); 349#endif 350#ifdef __amd64__ 351 /* 352 * Request that the physical pages underlying the message buffer be 353 * included in a crash dump. Since the message buffer is accessed 354 * through the direct map, they are not automatically included. 355 */ 356 pa = DMAP_TO_PHYS((vm_offset_t)msgbufp->msg_ptr); 357 last_pa = pa + round_page(msgbufsize); 358 while (pa < last_pa) { 359 dump_add_page(pa); 360 pa += PAGE_SIZE; 361 } 362#endif 363 /* 364 * Compute the number of pages of memory that will be available for 365 * use (taking into account the overhead of a page structure per 366 * page). 367 */ 368 first_page = low_water / PAGE_SIZE; 369#ifdef VM_PHYSSEG_SPARSE 370 page_range = 0; 371 for (i = 0; phys_avail[i + 1] != 0; i += 2) 372 page_range += atop(phys_avail[i + 1] - phys_avail[i]); 373#elif defined(VM_PHYSSEG_DENSE) 374 page_range = high_water / PAGE_SIZE - first_page; 375#else 376#error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined." 377#endif 378 end = new_end; 379 380 /* 381 * Reserve an unmapped guard page to trap access to vm_page_array[-1]. 382 */ 383 vaddr += PAGE_SIZE; 384 385 /* 386 * Initialize the mem entry structures now, and put them in the free 387 * queue. 388 */ 389 new_end = trunc_page(end - page_range * sizeof(struct vm_page)); 390 mapped = pmap_map(&vaddr, new_end, end, 391 VM_PROT_READ | VM_PROT_WRITE); 392 vm_page_array = (vm_page_t) mapped; 393#if VM_NRESERVLEVEL > 0 394 /* 395 * Allocate memory for the reservation management system's data 396 * structures. 397 */ 398 new_end = vm_reserv_startup(&vaddr, new_end, high_water); 399#endif 400#if defined(__amd64__) || defined(__mips__) 401 /* 402 * pmap_map on amd64 and mips can come out of the direct-map, not kvm 403 * like i386, so the pages must be tracked for a crashdump to include 404 * this data. This includes the vm_page_array and the early UMA 405 * bootstrap pages. 406 */ 407 for (pa = new_end; pa < phys_avail[biggestone + 1]; pa += PAGE_SIZE) 408 dump_add_page(pa); 409#endif 410 phys_avail[biggestone + 1] = new_end; 411 412 /* 413 * Clear all of the page structures 414 */ 415 bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page)); 416 for (i = 0; i < page_range; i++) 417 vm_page_array[i].order = VM_NFREEORDER; 418 vm_page_array_size = page_range; 419 420 /* 421 * Initialize the physical memory allocator. 422 */ 423 vm_phys_init(); 424 425 /* 426 * Add every available physical page that is not blacklisted to 427 * the free lists. 428 */ 429 cnt.v_page_count = 0; 430 cnt.v_free_count = 0; 431 list = getenv("vm.blacklist"); 432 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 433 pa = phys_avail[i]; 434 last_pa = phys_avail[i + 1]; 435 while (pa < last_pa) { 436 if (list != NULL && 437 vm_page_blacklist_lookup(list, pa)) 438 printf("Skipping page with pa 0x%jx\n", 439 (uintmax_t)pa); 440 else 441 vm_phys_add_page(pa); 442 pa += PAGE_SIZE; 443 } 444 } 445 freeenv(list); 446#if VM_NRESERVLEVEL > 0 447 /* 448 * Initialize the reservation management system. 449 */ 450 vm_reserv_init(); 451#endif 452 return (vaddr); 453} 454 455 456CTASSERT(offsetof(struct vm_page, aflags) % sizeof(uint32_t) == 0); 457 458void 459vm_page_aflag_set(vm_page_t m, uint8_t bits) 460{ 461 uint32_t *addr, val; 462 463 /* 464 * The PGA_WRITEABLE flag can only be set if the page is managed and 465 * VPO_BUSY. Currently, this flag is only set by pmap_enter(). 466 */ 467 KASSERT((bits & PGA_WRITEABLE) == 0 || 468 (m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == VPO_BUSY, 469 ("PGA_WRITEABLE and !VPO_BUSY")); 470 471 /* 472 * We want to use atomic updates for m->aflags, which is a 473 * byte wide. Not all architectures provide atomic operations 474 * on the single-byte destination. Punt and access the whole 475 * 4-byte word with an atomic update. Parallel non-atomic 476 * updates to the fields included in the update by proximity 477 * are handled properly by atomics. 478 */ 479 addr = (void *)&m->aflags; 480 MPASS(((uintptr_t)addr & (sizeof(uint32_t) - 1)) == 0); 481 val = bits; 482#if BYTE_ORDER == BIG_ENDIAN 483 val <<= 24; 484#endif 485 atomic_set_32(addr, val); 486} 487 488void 489vm_page_aflag_clear(vm_page_t m, uint8_t bits) 490{ 491 uint32_t *addr, val; 492 493 /* 494 * The PGA_REFERENCED flag can only be cleared if the object 495 * containing the page is locked. 496 */ 497 KASSERT((bits & PGA_REFERENCED) == 0 || VM_OBJECT_LOCKED(m->object), 498 ("PGA_REFERENCED and !VM_OBJECT_LOCKED")); 499 500 /* 501 * See the comment in vm_page_aflag_set(). 502 */ 503 addr = (void *)&m->aflags; 504 MPASS(((uintptr_t)addr & (sizeof(uint32_t) - 1)) == 0); 505 val = bits; 506#if BYTE_ORDER == BIG_ENDIAN 507 val <<= 24; 508#endif 509 atomic_clear_32(addr, val); 510} 511 512void 513vm_page_reference(vm_page_t m) 514{ 515 516 vm_page_aflag_set(m, PGA_REFERENCED); 517} 518 519void 520vm_page_busy(vm_page_t m) 521{ 522 523 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 524 KASSERT((m->oflags & VPO_BUSY) == 0, 525 ("vm_page_busy: page already busy!!!")); 526 m->oflags |= VPO_BUSY; 527} 528 529/* 530 * vm_page_flash: 531 * 532 * wakeup anyone waiting for the page. 533 */ 534void 535vm_page_flash(vm_page_t m) 536{ 537 538 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 539 if (m->oflags & VPO_WANTED) { 540 m->oflags &= ~VPO_WANTED; 541 wakeup(m); 542 } 543} 544 545/* 546 * vm_page_wakeup: 547 * 548 * clear the VPO_BUSY flag and wakeup anyone waiting for the 549 * page. 550 * 551 */ 552void 553vm_page_wakeup(vm_page_t m) 554{ 555 556 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 557 KASSERT(m->oflags & VPO_BUSY, ("vm_page_wakeup: page not busy!!!")); 558 m->oflags &= ~VPO_BUSY; 559 vm_page_flash(m); 560} 561 562void 563vm_page_io_start(vm_page_t m) 564{ 565 566 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 567 m->busy++; 568} 569 570void 571vm_page_io_finish(vm_page_t m) 572{ 573 574 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 575 KASSERT(m->busy > 0, ("vm_page_io_finish: page %p is not busy", m)); 576 m->busy--; 577 if (m->busy == 0) 578 vm_page_flash(m); 579} 580 581/* 582 * Keep page from being freed by the page daemon 583 * much of the same effect as wiring, except much lower 584 * overhead and should be used only for *very* temporary 585 * holding ("wiring"). 586 */ 587void 588vm_page_hold(vm_page_t mem) 589{ 590 591 vm_page_lock_assert(mem, MA_OWNED); 592 mem->hold_count++; 593} 594 595void 596vm_page_unhold(vm_page_t mem) 597{ 598 599 vm_page_lock_assert(mem, MA_OWNED); 600 --mem->hold_count; 601 KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!")); 602 if (mem->hold_count == 0 && mem->queue == PQ_HOLD) 603 vm_page_free_toq(mem); 604} 605 606/* 607 * vm_page_unhold_pages: 608 * 609 * Unhold each of the pages that is referenced by the given array. 610 */ 611void 612vm_page_unhold_pages(vm_page_t *ma, int count) 613{ 614 struct mtx *mtx, *new_mtx; 615 616 mtx = NULL; 617 for (; count != 0; count--) { 618 /* 619 * Avoid releasing and reacquiring the same page lock. 620 */ 621 new_mtx = vm_page_lockptr(*ma); 622 if (mtx != new_mtx) { 623 if (mtx != NULL) 624 mtx_unlock(mtx); 625 mtx = new_mtx; 626 mtx_lock(mtx); 627 } 628 vm_page_unhold(*ma); 629 ma++; 630 } 631 if (mtx != NULL) 632 mtx_unlock(mtx); 633} 634 635/* 636 * vm_page_getfake: 637 * 638 * Create a fictitious page with the specified physical address and 639 * memory attribute. The memory attribute is the only the machine- 640 * dependent aspect of a fictitious page that must be initialized. 641 */ 642vm_page_t 643vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr) 644{ 645 vm_page_t m; 646 647 m = uma_zalloc(fakepg_zone, M_WAITOK | M_ZERO); 648 m->phys_addr = paddr; 649 m->queue = PQ_NONE; 650 /* Fictitious pages don't use "segind". */ 651 m->flags = PG_FICTITIOUS; 652 /* Fictitious pages don't use "order" or "pool". */ 653 m->oflags = VPO_BUSY | VPO_UNMANAGED; 654 m->wire_count = 1; 655 pmap_page_set_memattr(m, memattr); 656 return (m); 657} 658 659/* 660 * vm_page_putfake: 661 * 662 * Release a fictitious page. 663 */ 664void 665vm_page_putfake(vm_page_t m) 666{ 667 668 KASSERT((m->flags & PG_FICTITIOUS) != 0, 669 ("vm_page_putfake: bad page %p", m)); 670 uma_zfree(fakepg_zone, m); 671} 672 673/* 674 * vm_page_updatefake: 675 * 676 * Update the given fictitious page to the specified physical address and 677 * memory attribute. 678 */ 679void 680vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr) 681{ 682 683 KASSERT((m->flags & PG_FICTITIOUS) != 0, 684 ("vm_page_updatefake: bad page %p", m)); 685 m->phys_addr = paddr; 686 pmap_page_set_memattr(m, memattr); 687} 688 689/* 690 * vm_page_free: 691 * 692 * Free a page. 693 */ 694void 695vm_page_free(vm_page_t m) 696{ 697 698 m->flags &= ~PG_ZERO; 699 vm_page_free_toq(m); 700} 701 702/* 703 * vm_page_free_zero: 704 * 705 * Free a page to the zerod-pages queue 706 */ 707void 708vm_page_free_zero(vm_page_t m) 709{ 710 711 m->flags |= PG_ZERO; 712 vm_page_free_toq(m); 713} 714 715/* 716 * vm_page_sleep: 717 * 718 * Sleep and release the page and page queues locks. 719 * 720 * The object containing the given page must be locked. 721 */ 722void 723vm_page_sleep(vm_page_t m, const char *msg) 724{ 725 726 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 727 if (mtx_owned(&vm_page_queue_mtx)) 728 vm_page_unlock_queues(); 729 if (mtx_owned(vm_page_lockptr(m))) 730 vm_page_unlock(m); 731 732 /* 733 * It's possible that while we sleep, the page will get 734 * unbusied and freed. If we are holding the object 735 * lock, we will assume we hold a reference to the object 736 * such that even if m->object changes, we can re-lock 737 * it. 738 */ 739 m->oflags |= VPO_WANTED; 740 msleep(m, VM_OBJECT_MTX(m->object), PVM, msg, 0); 741} 742 743/* 744 * vm_page_dirty: 745 * 746 * Set all bits in the page's dirty field. 747 * 748 * The object containing the specified page must be locked if the 749 * call is made from the machine-independent layer. 750 * 751 * See vm_page_clear_dirty_mask(). 752 */ 753void 754vm_page_dirty(vm_page_t m) 755{ 756 757 KASSERT((m->flags & PG_CACHED) == 0, 758 ("vm_page_dirty: page in cache!")); 759 KASSERT(!VM_PAGE_IS_FREE(m), 760 ("vm_page_dirty: page is free!")); 761 KASSERT(m->valid == VM_PAGE_BITS_ALL, 762 ("vm_page_dirty: page is invalid!")); 763 m->dirty = VM_PAGE_BITS_ALL; 764} 765 766/* 767 * vm_page_splay: 768 * 769 * Implements Sleator and Tarjan's top-down splay algorithm. Returns 770 * the vm_page containing the given pindex. If, however, that 771 * pindex is not found in the vm_object, returns a vm_page that is 772 * adjacent to the pindex, coming before or after it. 773 */ 774vm_page_t 775vm_page_splay(vm_pindex_t pindex, vm_page_t root) 776{ 777 struct vm_page dummy; 778 vm_page_t lefttreemax, righttreemin, y; 779 780 if (root == NULL) 781 return (root); 782 lefttreemax = righttreemin = &dummy; 783 for (;; root = y) { 784 if (pindex < root->pindex) { 785 if ((y = root->left) == NULL) 786 break; 787 if (pindex < y->pindex) { 788 /* Rotate right. */ 789 root->left = y->right; 790 y->right = root; 791 root = y; 792 if ((y = root->left) == NULL) 793 break; 794 } 795 /* Link into the new root's right tree. */ 796 righttreemin->left = root; 797 righttreemin = root; 798 } else if (pindex > root->pindex) { 799 if ((y = root->right) == NULL) 800 break; 801 if (pindex > y->pindex) { 802 /* Rotate left. */ 803 root->right = y->left; 804 y->left = root; 805 root = y; 806 if ((y = root->right) == NULL) 807 break; 808 } 809 /* Link into the new root's left tree. */ 810 lefttreemax->right = root; 811 lefttreemax = root; 812 } else 813 break; 814 } 815 /* Assemble the new root. */ 816 lefttreemax->right = root->left; 817 righttreemin->left = root->right; 818 root->left = dummy.right; 819 root->right = dummy.left; 820 return (root); 821} 822 823/* 824 * vm_page_insert: [ internal use only ] 825 * 826 * Inserts the given mem entry into the object and object list. 827 * 828 * The pagetables are not updated but will presumably fault the page 829 * in if necessary, or if a kernel page the caller will at some point 830 * enter the page into the kernel's pmap. We are not allowed to block 831 * here so we *can't* do this anyway. 832 * 833 * The object and page must be locked. 834 * This routine may not block. 835 */ 836void 837vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex) 838{ 839 vm_page_t root; 840 841 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 842 if (m->object != NULL) 843 panic("vm_page_insert: page already inserted"); 844 845 /* 846 * Record the object/offset pair in this page 847 */ 848 m->object = object; 849 m->pindex = pindex; 850 851 /* 852 * Now link into the object's ordered list of backed pages. 853 */ 854 root = object->root; 855 if (root == NULL) { 856 m->left = NULL; 857 m->right = NULL; 858 TAILQ_INSERT_TAIL(&object->memq, m, listq); 859 } else { 860 root = vm_page_splay(pindex, root); 861 if (pindex < root->pindex) { 862 m->left = root->left; 863 m->right = root; 864 root->left = NULL; 865 TAILQ_INSERT_BEFORE(root, m, listq); 866 } else if (pindex == root->pindex) 867 panic("vm_page_insert: offset already allocated"); 868 else { 869 m->right = root->right; 870 m->left = root; 871 root->right = NULL; 872 TAILQ_INSERT_AFTER(&object->memq, root, m, listq); 873 } 874 } 875 object->root = m; 876 877 /* 878 * show that the object has one more resident page. 879 */ 880 object->resident_page_count++; 881 /* 882 * Hold the vnode until the last page is released. 883 */ 884 if (object->resident_page_count == 1 && object->type == OBJT_VNODE) 885 vhold((struct vnode *)object->handle); 886 887 /* 888 * Since we are inserting a new and possibly dirty page, 889 * update the object's OBJ_MIGHTBEDIRTY flag. 890 */ 891 if (m->aflags & PGA_WRITEABLE) 892 vm_object_set_writeable_dirty(object); 893} 894 895/* 896 * vm_page_remove: 897 * NOTE: used by device pager as well -wfj 898 * 899 * Removes the given mem entry from the object/offset-page 900 * table and the object page list, but do not invalidate/terminate 901 * the backing store. 902 * 903 * The object and page must be locked. 904 * The underlying pmap entry (if any) is NOT removed here. 905 * This routine may not block. 906 */ 907void 908vm_page_remove(vm_page_t m) 909{ 910 vm_object_t object; 911 vm_page_t next, prev, root; 912 913 if ((m->oflags & VPO_UNMANAGED) == 0) 914 vm_page_lock_assert(m, MA_OWNED); 915 if ((object = m->object) == NULL) 916 return; 917 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 918 if (m->oflags & VPO_BUSY) { 919 m->oflags &= ~VPO_BUSY; 920 vm_page_flash(m); 921 } 922 923 /* 924 * Now remove from the object's list of backed pages. 925 */ 926 if ((next = TAILQ_NEXT(m, listq)) != NULL && next->left == m) { 927 /* 928 * Since the page's successor in the list is also its parent 929 * in the tree, its right subtree must be empty. 930 */ 931 next->left = m->left; 932 KASSERT(m->right == NULL, 933 ("vm_page_remove: page %p has right child", m)); 934 } else if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL && 935 prev->right == m) { 936 /* 937 * Since the page's predecessor in the list is also its parent 938 * in the tree, its left subtree must be empty. 939 */ 940 KASSERT(m->left == NULL, 941 ("vm_page_remove: page %p has left child", m)); 942 prev->right = m->right; 943 } else { 944 if (m != object->root) 945 vm_page_splay(m->pindex, object->root); 946 if (m->left == NULL) 947 root = m->right; 948 else if (m->right == NULL) 949 root = m->left; 950 else { 951 /* 952 * Move the page's successor to the root, because 953 * pages are usually removed in ascending order. 954 */ 955 if (m->right != next) 956 vm_page_splay(m->pindex, m->right); 957 next->left = m->left; 958 root = next; 959 } 960 object->root = root; 961 } 962 TAILQ_REMOVE(&object->memq, m, listq); 963 964 /* 965 * And show that the object has one fewer resident page. 966 */ 967 object->resident_page_count--; 968 /* 969 * The vnode may now be recycled. 970 */ 971 if (object->resident_page_count == 0 && object->type == OBJT_VNODE) 972 vdrop((struct vnode *)object->handle); 973 974 m->object = NULL; 975} 976 977/* 978 * vm_page_lookup: 979 * 980 * Returns the page associated with the object/offset 981 * pair specified; if none is found, NULL is returned. 982 * 983 * The object must be locked. 984 * This routine may not block. 985 * This is a critical path routine 986 */ 987vm_page_t 988vm_page_lookup(vm_object_t object, vm_pindex_t pindex) 989{ 990 vm_page_t m; 991 992 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 993 if ((m = object->root) != NULL && m->pindex != pindex) { 994 m = vm_page_splay(pindex, m); 995 if ((object->root = m)->pindex != pindex) 996 m = NULL; 997 } 998 return (m); 999} 1000 1001/* 1002 * vm_page_find_least: 1003 * 1004 * Returns the page associated with the object with least pindex 1005 * greater than or equal to the parameter pindex, or NULL. 1006 * 1007 * The object must be locked. 1008 * The routine may not block. 1009 */ 1010vm_page_t 1011vm_page_find_least(vm_object_t object, vm_pindex_t pindex) 1012{ 1013 vm_page_t m; 1014 1015 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1016 if ((m = TAILQ_FIRST(&object->memq)) != NULL) { 1017 if (m->pindex < pindex) { 1018 m = vm_page_splay(pindex, object->root); 1019 if ((object->root = m)->pindex < pindex) 1020 m = TAILQ_NEXT(m, listq); 1021 } 1022 } 1023 return (m); 1024} 1025 1026/* 1027 * Returns the given page's successor (by pindex) within the object if it is 1028 * resident; if none is found, NULL is returned. 1029 * 1030 * The object must be locked. 1031 */ 1032vm_page_t 1033vm_page_next(vm_page_t m) 1034{ 1035 vm_page_t next; 1036 1037 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1038 if ((next = TAILQ_NEXT(m, listq)) != NULL && 1039 next->pindex != m->pindex + 1) 1040 next = NULL; 1041 return (next); 1042} 1043 1044/* 1045 * Returns the given page's predecessor (by pindex) within the object if it is 1046 * resident; if none is found, NULL is returned. 1047 * 1048 * The object must be locked. 1049 */ 1050vm_page_t 1051vm_page_prev(vm_page_t m) 1052{ 1053 vm_page_t prev; 1054 1055 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1056 if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL && 1057 prev->pindex != m->pindex - 1) 1058 prev = NULL; 1059 return (prev); 1060} 1061 1062/* 1063 * vm_page_rename: 1064 * 1065 * Move the given memory entry from its 1066 * current object to the specified target object/offset. 1067 * 1068 * The object must be locked. 1069 * This routine may not block. 1070 * 1071 * Note: swap associated with the page must be invalidated by the move. We 1072 * have to do this for several reasons: (1) we aren't freeing the 1073 * page, (2) we are dirtying the page, (3) the VM system is probably 1074 * moving the page from object A to B, and will then later move 1075 * the backing store from A to B and we can't have a conflict. 1076 * 1077 * Note: we *always* dirty the page. It is necessary both for the 1078 * fact that we moved it, and because we may be invalidating 1079 * swap. If the page is on the cache, we have to deactivate it 1080 * or vm_page_dirty() will panic. Dirty pages are not allowed 1081 * on the cache. 1082 */ 1083void 1084vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex) 1085{ 1086 1087 vm_page_remove(m); 1088 vm_page_insert(m, new_object, new_pindex); 1089 vm_page_dirty(m); 1090} 1091 1092/* 1093 * Convert all of the given object's cached pages that have a 1094 * pindex within the given range into free pages. If the value 1095 * zero is given for "end", then the range's upper bound is 1096 * infinity. If the given object is backed by a vnode and it 1097 * transitions from having one or more cached pages to none, the 1098 * vnode's hold count is reduced. 1099 */ 1100void 1101vm_page_cache_free(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 1102{ 1103 vm_page_t m, m_next; 1104 boolean_t empty; 1105 1106 mtx_lock(&vm_page_queue_free_mtx); 1107 if (__predict_false(object->cache == NULL)) { 1108 mtx_unlock(&vm_page_queue_free_mtx); 1109 return; 1110 } 1111 m = object->cache = vm_page_splay(start, object->cache); 1112 if (m->pindex < start) { 1113 if (m->right == NULL) 1114 m = NULL; 1115 else { 1116 m_next = vm_page_splay(start, m->right); 1117 m_next->left = m; 1118 m->right = NULL; 1119 m = object->cache = m_next; 1120 } 1121 } 1122 1123 /* 1124 * At this point, "m" is either (1) a reference to the page 1125 * with the least pindex that is greater than or equal to 1126 * "start" or (2) NULL. 1127 */ 1128 for (; m != NULL && (m->pindex < end || end == 0); m = m_next) { 1129 /* 1130 * Find "m"'s successor and remove "m" from the 1131 * object's cache. 1132 */ 1133 if (m->right == NULL) { 1134 object->cache = m->left; 1135 m_next = NULL; 1136 } else { 1137 m_next = vm_page_splay(start, m->right); 1138 m_next->left = m->left; 1139 object->cache = m_next; 1140 } 1141 /* Convert "m" to a free page. */ 1142 m->object = NULL; 1143 m->valid = 0; 1144 /* Clear PG_CACHED and set PG_FREE. */ 1145 m->flags ^= PG_CACHED | PG_FREE; 1146 KASSERT((m->flags & (PG_CACHED | PG_FREE)) == PG_FREE, 1147 ("vm_page_cache_free: page %p has inconsistent flags", m)); 1148 cnt.v_cache_count--; 1149 cnt.v_free_count++; 1150 } 1151 empty = object->cache == NULL; 1152 mtx_unlock(&vm_page_queue_free_mtx); 1153 if (object->type == OBJT_VNODE && empty) 1154 vdrop(object->handle); 1155} 1156 1157/* 1158 * Returns the cached page that is associated with the given 1159 * object and offset. If, however, none exists, returns NULL. 1160 * 1161 * The free page queue must be locked. 1162 */ 1163static inline vm_page_t 1164vm_page_cache_lookup(vm_object_t object, vm_pindex_t pindex) 1165{ 1166 vm_page_t m; 1167 1168 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 1169 if ((m = object->cache) != NULL && m->pindex != pindex) { 1170 m = vm_page_splay(pindex, m); 1171 if ((object->cache = m)->pindex != pindex) 1172 m = NULL; 1173 } 1174 return (m); 1175} 1176 1177/* 1178 * Remove the given cached page from its containing object's 1179 * collection of cached pages. 1180 * 1181 * The free page queue must be locked. 1182 */ 1183void 1184vm_page_cache_remove(vm_page_t m) 1185{ 1186 vm_object_t object; 1187 vm_page_t root; 1188 1189 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 1190 KASSERT((m->flags & PG_CACHED) != 0, 1191 ("vm_page_cache_remove: page %p is not cached", m)); 1192 object = m->object; 1193 if (m != object->cache) { 1194 root = vm_page_splay(m->pindex, object->cache); 1195 KASSERT(root == m, 1196 ("vm_page_cache_remove: page %p is not cached in object %p", 1197 m, object)); 1198 } 1199 if (m->left == NULL) 1200 root = m->right; 1201 else if (m->right == NULL) 1202 root = m->left; 1203 else { 1204 root = vm_page_splay(m->pindex, m->left); 1205 root->right = m->right; 1206 } 1207 object->cache = root; 1208 m->object = NULL; 1209 cnt.v_cache_count--; 1210} 1211 1212/* 1213 * Transfer all of the cached pages with offset greater than or 1214 * equal to 'offidxstart' from the original object's cache to the 1215 * new object's cache. However, any cached pages with offset 1216 * greater than or equal to the new object's size are kept in the 1217 * original object. Initially, the new object's cache must be 1218 * empty. Offset 'offidxstart' in the original object must 1219 * correspond to offset zero in the new object. 1220 * 1221 * The new object must be locked. 1222 */ 1223void 1224vm_page_cache_transfer(vm_object_t orig_object, vm_pindex_t offidxstart, 1225 vm_object_t new_object) 1226{ 1227 vm_page_t m, m_next; 1228 1229 /* 1230 * Insertion into an object's collection of cached pages 1231 * requires the object to be locked. In contrast, removal does 1232 * not. 1233 */ 1234 VM_OBJECT_LOCK_ASSERT(new_object, MA_OWNED); 1235 KASSERT(new_object->cache == NULL, 1236 ("vm_page_cache_transfer: object %p has cached pages", 1237 new_object)); 1238 mtx_lock(&vm_page_queue_free_mtx); 1239 if ((m = orig_object->cache) != NULL) { 1240 /* 1241 * Transfer all of the pages with offset greater than or 1242 * equal to 'offidxstart' from the original object's 1243 * cache to the new object's cache. 1244 */ 1245 m = vm_page_splay(offidxstart, m); 1246 if (m->pindex < offidxstart) { 1247 orig_object->cache = m; 1248 new_object->cache = m->right; 1249 m->right = NULL; 1250 } else { 1251 orig_object->cache = m->left; 1252 new_object->cache = m; 1253 m->left = NULL; 1254 } 1255 while ((m = new_object->cache) != NULL) { 1256 if ((m->pindex - offidxstart) >= new_object->size) { 1257 /* 1258 * Return all of the cached pages with 1259 * offset greater than or equal to the 1260 * new object's size to the original 1261 * object's cache. 1262 */ 1263 new_object->cache = m->left; 1264 m->left = orig_object->cache; 1265 orig_object->cache = m; 1266 break; 1267 } 1268 m_next = vm_page_splay(m->pindex, m->right); 1269 /* Update the page's object and offset. */ 1270 m->object = new_object; 1271 m->pindex -= offidxstart; 1272 if (m_next == NULL) 1273 break; 1274 m->right = NULL; 1275 m_next->left = m; 1276 new_object->cache = m_next; 1277 } 1278 KASSERT(new_object->cache == NULL || 1279 new_object->type == OBJT_SWAP, 1280 ("vm_page_cache_transfer: object %p's type is incompatible" 1281 " with cached pages", new_object)); 1282 } 1283 mtx_unlock(&vm_page_queue_free_mtx); 1284} 1285 1286/* 1287 * Returns TRUE if a cached page is associated with the given object and 1288 * offset, and FALSE otherwise. 1289 * 1290 * The object must be locked. 1291 */ 1292boolean_t 1293vm_page_is_cached(vm_object_t object, vm_pindex_t pindex) 1294{ 1295 vm_page_t m; 1296 1297 /* 1298 * Insertion into an object's collection of cached pages requires the 1299 * object to be locked. Therefore, if the object is locked and the 1300 * object's collection is empty, there is no need to acquire the free 1301 * page queues lock in order to prove that the specified page doesn't 1302 * exist. 1303 */ 1304 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1305 if (object->cache == NULL) 1306 return (FALSE); 1307 mtx_lock(&vm_page_queue_free_mtx); 1308 m = vm_page_cache_lookup(object, pindex); 1309 mtx_unlock(&vm_page_queue_free_mtx); 1310 return (m != NULL); 1311} 1312 1313/* 1314 * vm_page_alloc: 1315 * 1316 * Allocate and return a memory cell associated 1317 * with this VM object/offset pair. 1318 * 1319 * The caller must always specify an allocation class. 1320 * 1321 * allocation classes: 1322 * VM_ALLOC_NORMAL normal process request 1323 * VM_ALLOC_SYSTEM system *really* needs a page 1324 * VM_ALLOC_INTERRUPT interrupt time request 1325 * 1326 * optional allocation flags: 1327 * VM_ALLOC_ZERO prefer a zeroed page 1328 * VM_ALLOC_WIRED wire the allocated page 1329 * VM_ALLOC_NOOBJ page is not associated with a vm object 1330 * VM_ALLOC_NOBUSY do not set the page busy 1331 * VM_ALLOC_IFCACHED return page only if it is cached 1332 * VM_ALLOC_IFNOTCACHED return NULL, do not reactivate if the page 1333 * is cached 1334 * 1335 * This routine may not sleep. 1336 */ 1337vm_page_t 1338vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) 1339{ 1340 struct vnode *vp = NULL; 1341 vm_object_t m_object; 1342 vm_page_t m; 1343 int flags, page_req; 1344 1345 if ((req & VM_ALLOC_NOOBJ) == 0) { 1346 KASSERT(object != NULL, 1347 ("vm_page_alloc: NULL object.")); 1348 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1349 } 1350 1351 page_req = req & VM_ALLOC_CLASS_MASK; 1352 1353 /* 1354 * The pager is allowed to eat deeper into the free page list. 1355 */ 1356 if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) 1357 page_req = VM_ALLOC_SYSTEM; 1358 1359 mtx_lock(&vm_page_queue_free_mtx); 1360 if (cnt.v_free_count + cnt.v_cache_count > cnt.v_free_reserved || 1361 (page_req == VM_ALLOC_SYSTEM && 1362 cnt.v_free_count + cnt.v_cache_count > cnt.v_interrupt_free_min) || 1363 (page_req == VM_ALLOC_INTERRUPT && 1364 cnt.v_free_count + cnt.v_cache_count > 0)) { 1365 /* 1366 * Allocate from the free queue if the number of free pages 1367 * exceeds the minimum for the request class. 1368 */ 1369 if (object != NULL && 1370 (m = vm_page_cache_lookup(object, pindex)) != NULL) { 1371 if ((req & VM_ALLOC_IFNOTCACHED) != 0) { 1372 mtx_unlock(&vm_page_queue_free_mtx); 1373 return (NULL); 1374 } 1375 if (vm_phys_unfree_page(m)) 1376 vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, 0); 1377#if VM_NRESERVLEVEL > 0 1378 else if (!vm_reserv_reactivate_page(m)) 1379#else 1380 else 1381#endif 1382 panic("vm_page_alloc: cache page %p is missing" 1383 " from the free queue", m); 1384 } else if ((req & VM_ALLOC_IFCACHED) != 0) { 1385 mtx_unlock(&vm_page_queue_free_mtx); 1386 return (NULL); 1387#if VM_NRESERVLEVEL > 0 1388 } else if (object == NULL || object->type == OBJT_DEVICE || 1389 object->type == OBJT_SG || 1390 (object->flags & OBJ_COLORED) == 0 || 1391 (m = vm_reserv_alloc_page(object, pindex)) == NULL) { 1392#else 1393 } else { 1394#endif 1395 m = vm_phys_alloc_pages(object != NULL ? 1396 VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0); 1397#if VM_NRESERVLEVEL > 0 1398 if (m == NULL && vm_reserv_reclaim_inactive()) { 1399 m = vm_phys_alloc_pages(object != NULL ? 1400 VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 1401 0); 1402 } 1403#endif 1404 } 1405 } else { 1406 /* 1407 * Not allocatable, give up. 1408 */ 1409 mtx_unlock(&vm_page_queue_free_mtx); 1410 atomic_add_int(&vm_pageout_deficit, 1411 MAX((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1)); 1412 pagedaemon_wakeup(); 1413 return (NULL); 1414 } 1415 1416 /* 1417 * At this point we had better have found a good page. 1418 */ 1419 1420 KASSERT(m != NULL, ("vm_page_alloc: missing page")); 1421 KASSERT(m->queue == PQ_NONE, 1422 ("vm_page_alloc: page %p has unexpected queue %d", m, m->queue)); 1423 KASSERT(m->wire_count == 0, ("vm_page_alloc: page %p is wired", m)); 1424 KASSERT(m->hold_count == 0, ("vm_page_alloc: page %p is held", m)); 1425 KASSERT(m->busy == 0, ("vm_page_alloc: page %p is busy", m)); 1426 KASSERT(m->dirty == 0, ("vm_page_alloc: page %p is dirty", m)); 1427 KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT, 1428 ("vm_page_alloc: page %p has unexpected memattr %d", m, 1429 pmap_page_get_memattr(m))); 1430 if ((m->flags & PG_CACHED) != 0) { 1431 KASSERT(m->valid != 0, 1432 ("vm_page_alloc: cached page %p is invalid", m)); 1433 if (m->object == object && m->pindex == pindex) 1434 cnt.v_reactivated++; 1435 else 1436 m->valid = 0; 1437 m_object = m->object; 1438 vm_page_cache_remove(m); 1439 if (m_object->type == OBJT_VNODE && m_object->cache == NULL) 1440 vp = m_object->handle; 1441 } else { 1442 KASSERT(VM_PAGE_IS_FREE(m), 1443 ("vm_page_alloc: page %p is not free", m)); 1444 KASSERT(m->valid == 0, 1445 ("vm_page_alloc: free page %p is valid", m)); 1446 cnt.v_free_count--; 1447 } 1448 1449 /* 1450 * Only the PG_ZERO flag is inherited. The PG_CACHED or PG_FREE flag 1451 * must be cleared before the free page queues lock is released. 1452 */ 1453 flags = 0; 1454 if (req & VM_ALLOC_NODUMP) 1455 flags |= PG_NODUMP; 1456 if (m->flags & PG_ZERO) { 1457 vm_page_zero_count--; 1458 if (req & VM_ALLOC_ZERO) 1459 flags = PG_ZERO; 1460 } 1461 m->flags = flags; 1462 mtx_unlock(&vm_page_queue_free_mtx); 1463 m->aflags = 0; 1464 if (object == NULL || object->type == OBJT_PHYS) 1465 m->oflags = VPO_UNMANAGED; 1466 else 1467 m->oflags = 0; 1468 if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ)) == 0) 1469 m->oflags |= VPO_BUSY; 1470 if (req & VM_ALLOC_WIRED) { 1471 /* 1472 * The page lock is not required for wiring a page until that 1473 * page is inserted into the object. 1474 */ 1475 atomic_add_int(&cnt.v_wire_count, 1); 1476 m->wire_count = 1; 1477 } 1478 m->act_count = 0; 1479 1480 if (object != NULL) { 1481 /* Ignore device objects; the pager sets "memattr" for them. */ 1482 if (object->memattr != VM_MEMATTR_DEFAULT && 1483 object->type != OBJT_DEVICE && object->type != OBJT_SG) 1484 pmap_page_set_memattr(m, object->memattr); 1485 vm_page_insert(m, object, pindex); 1486 } else 1487 m->pindex = pindex; 1488 1489 /* 1490 * The following call to vdrop() must come after the above call 1491 * to vm_page_insert() in case both affect the same object and 1492 * vnode. Otherwise, the affected vnode's hold count could 1493 * temporarily become zero. 1494 */ 1495 if (vp != NULL) 1496 vdrop(vp); 1497 1498 /* 1499 * Don't wakeup too often - wakeup the pageout daemon when 1500 * we would be nearly out of memory. 1501 */ 1502 if (vm_paging_needed()) 1503 pagedaemon_wakeup(); 1504 1505 return (m); 1506} 1507 1508/* 1509 * Initialize a page that has been freshly dequeued from a freelist. 1510 * The caller has to drop the vnode returned, if it is not NULL. 1511 * 1512 * To be called with vm_page_queue_free_mtx held. 1513 */ 1514struct vnode * 1515vm_page_alloc_init(vm_page_t m) 1516{ 1517 struct vnode *drop; 1518 vm_object_t m_object; 1519 1520 KASSERT(m->queue == PQ_NONE, 1521 ("vm_page_alloc_init: page %p has unexpected queue %d", 1522 m, m->queue)); 1523 KASSERT(m->wire_count == 0, 1524 ("vm_page_alloc_init: page %p is wired", m)); 1525 KASSERT(m->hold_count == 0, 1526 ("vm_page_alloc_init: page %p is held", m)); 1527 KASSERT(m->busy == 0, 1528 ("vm_page_alloc_init: page %p is busy", m)); 1529 KASSERT(m->dirty == 0, 1530 ("vm_page_alloc_init: page %p is dirty", m)); 1531 KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT, 1532 ("vm_page_alloc_init: page %p has unexpected memattr %d", 1533 m, pmap_page_get_memattr(m))); 1534 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 1535 drop = NULL; 1536 if ((m->flags & PG_CACHED) != 0) { 1537 m->valid = 0; 1538 m_object = m->object; 1539 vm_page_cache_remove(m); 1540 if (m_object->type == OBJT_VNODE && 1541 m_object->cache == NULL) 1542 drop = m_object->handle; 1543 } else { 1544 KASSERT(VM_PAGE_IS_FREE(m), 1545 ("vm_page_alloc_init: page %p is not free", m)); 1546 KASSERT(m->valid == 0, 1547 ("vm_page_alloc_init: free page %p is valid", m)); 1548 cnt.v_free_count--; 1549 } 1550 if (m->flags & PG_ZERO) 1551 vm_page_zero_count--; 1552 /* Don't clear the PG_ZERO flag; we'll need it later. */ 1553 m->flags &= PG_ZERO; 1554 m->aflags = 0; 1555 m->oflags = VPO_UNMANAGED; 1556 /* Unmanaged pages don't use "act_count". */ 1557 return (drop); 1558} 1559 1560/* 1561 * vm_page_alloc_freelist: 1562 * 1563 * Allocate a page from the specified freelist. 1564 * Only the ALLOC_CLASS values in req are honored, other request flags 1565 * are ignored. 1566 */ 1567vm_page_t 1568vm_page_alloc_freelist(int flind, int req) 1569{ 1570 struct vnode *drop; 1571 vm_page_t m; 1572 int page_req; 1573 1574 m = NULL; 1575 page_req = req & VM_ALLOC_CLASS_MASK; 1576 mtx_lock(&vm_page_queue_free_mtx); 1577 /* 1578 * Do not allocate reserved pages unless the req has asked for it. 1579 */ 1580 if (cnt.v_free_count + cnt.v_cache_count > cnt.v_free_reserved || 1581 (page_req == VM_ALLOC_SYSTEM && 1582 cnt.v_free_count + cnt.v_cache_count > cnt.v_interrupt_free_min) || 1583 (page_req == VM_ALLOC_INTERRUPT && 1584 cnt.v_free_count + cnt.v_cache_count > 0)) { 1585 m = vm_phys_alloc_freelist_pages(flind, VM_FREEPOOL_DIRECT, 0); 1586 } 1587 if (m == NULL) { 1588 mtx_unlock(&vm_page_queue_free_mtx); 1589 return (NULL); 1590 } 1591 drop = vm_page_alloc_init(m); 1592 mtx_unlock(&vm_page_queue_free_mtx); 1593 if (drop) 1594 vdrop(drop); 1595 return (m); 1596} 1597 1598/* 1599 * vm_wait: (also see VM_WAIT macro) 1600 * 1601 * Block until free pages are available for allocation 1602 * - Called in various places before memory allocations. 1603 */ 1604void 1605vm_wait(void) 1606{ 1607 1608 mtx_lock(&vm_page_queue_free_mtx); 1609 if (curproc == pageproc) { 1610 vm_pageout_pages_needed = 1; 1611 msleep(&vm_pageout_pages_needed, &vm_page_queue_free_mtx, 1612 PDROP | PSWP, "VMWait", 0); 1613 } else { 1614 if (!vm_pages_needed) { 1615 vm_pages_needed = 1; 1616 wakeup(&vm_pages_needed); 1617 } 1618 msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PVM, 1619 "vmwait", 0); 1620 } 1621} 1622 1623/* 1624 * vm_waitpfault: (also see VM_WAITPFAULT macro) 1625 * 1626 * Block until free pages are available for allocation 1627 * - Called only in vm_fault so that processes page faulting 1628 * can be easily tracked. 1629 * - Sleeps at a lower priority than vm_wait() so that vm_wait()ing 1630 * processes will be able to grab memory first. Do not change 1631 * this balance without careful testing first. 1632 */ 1633void 1634vm_waitpfault(void) 1635{ 1636 1637 mtx_lock(&vm_page_queue_free_mtx); 1638 if (!vm_pages_needed) { 1639 vm_pages_needed = 1; 1640 wakeup(&vm_pages_needed); 1641 } 1642 msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PUSER, 1643 "pfault", 0); 1644} 1645 1646/* 1647 * vm_page_requeue: 1648 * 1649 * Move the given page to the tail of its present page queue. 1650 * 1651 * The page queues must be locked. 1652 */ 1653void 1654vm_page_requeue(vm_page_t m) 1655{ 1656 struct vpgqueues *vpq; 1657 int queue; 1658 1659 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1660 queue = m->queue; 1661 KASSERT(queue != PQ_NONE, 1662 ("vm_page_requeue: page %p is not queued", m)); 1663 vpq = &vm_page_queues[queue]; 1664 TAILQ_REMOVE(&vpq->pl, m, pageq); 1665 TAILQ_INSERT_TAIL(&vpq->pl, m, pageq); 1666} 1667 1668/* 1669 * vm_page_queue_remove: 1670 * 1671 * Remove the given page from the specified queue. 1672 * 1673 * The page and page queues must be locked. 1674 */ 1675static __inline void 1676vm_page_queue_remove(int queue, vm_page_t m) 1677{ 1678 struct vpgqueues *pq; 1679 1680 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1681 vm_page_lock_assert(m, MA_OWNED); 1682 pq = &vm_page_queues[queue]; 1683 TAILQ_REMOVE(&pq->pl, m, pageq); 1684 (*pq->cnt)--; 1685} 1686 1687/* 1688 * vm_pageq_remove: 1689 * 1690 * Remove a page from its queue. 1691 * 1692 * The given page must be locked. 1693 * This routine may not block. 1694 */ 1695void 1696vm_pageq_remove(vm_page_t m) 1697{ 1698 int queue; 1699 1700 vm_page_lock_assert(m, MA_OWNED); 1701 if ((queue = m->queue) != PQ_NONE) { 1702 vm_page_lock_queues(); 1703 m->queue = PQ_NONE; 1704 vm_page_queue_remove(queue, m); 1705 vm_page_unlock_queues(); 1706 } 1707} 1708 1709/* 1710 * vm_page_enqueue: 1711 * 1712 * Add the given page to the specified queue. 1713 * 1714 * The page queues must be locked. 1715 */ 1716static void 1717vm_page_enqueue(int queue, vm_page_t m) 1718{ 1719 struct vpgqueues *vpq; 1720 1721 vpq = &vm_page_queues[queue]; 1722 m->queue = queue; 1723 TAILQ_INSERT_TAIL(&vpq->pl, m, pageq); 1724 ++*vpq->cnt; 1725} 1726 1727/* 1728 * vm_page_activate: 1729 * 1730 * Put the specified page on the active list (if appropriate). 1731 * Ensure that act_count is at least ACT_INIT but do not otherwise 1732 * mess with it. 1733 * 1734 * The page must be locked. 1735 * This routine may not block. 1736 */ 1737void 1738vm_page_activate(vm_page_t m) 1739{ 1740 int queue; 1741 1742 vm_page_lock_assert(m, MA_OWNED); 1743 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1744 if ((queue = m->queue) != PQ_ACTIVE) { 1745 if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) { 1746 if (m->act_count < ACT_INIT) 1747 m->act_count = ACT_INIT; 1748 vm_page_lock_queues(); 1749 if (queue != PQ_NONE) 1750 vm_page_queue_remove(queue, m); 1751 vm_page_enqueue(PQ_ACTIVE, m); 1752 vm_page_unlock_queues(); 1753 } else 1754 KASSERT(queue == PQ_NONE, 1755 ("vm_page_activate: wired page %p is queued", m)); 1756 } else { 1757 if (m->act_count < ACT_INIT) 1758 m->act_count = ACT_INIT; 1759 } 1760} 1761 1762/* 1763 * vm_page_free_wakeup: 1764 * 1765 * Helper routine for vm_page_free_toq() and vm_page_cache(). This 1766 * routine is called when a page has been added to the cache or free 1767 * queues. 1768 * 1769 * The page queues must be locked. 1770 * This routine may not block. 1771 */ 1772static inline void 1773vm_page_free_wakeup(void) 1774{ 1775 1776 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 1777 /* 1778 * if pageout daemon needs pages, then tell it that there are 1779 * some free. 1780 */ 1781 if (vm_pageout_pages_needed && 1782 cnt.v_cache_count + cnt.v_free_count >= cnt.v_pageout_free_min) { 1783 wakeup(&vm_pageout_pages_needed); 1784 vm_pageout_pages_needed = 0; 1785 } 1786 /* 1787 * wakeup processes that are waiting on memory if we hit a 1788 * high water mark. And wakeup scheduler process if we have 1789 * lots of memory. this process will swapin processes. 1790 */ 1791 if (vm_pages_needed && !vm_page_count_min()) { 1792 vm_pages_needed = 0; 1793 wakeup(&cnt.v_free_count); 1794 } 1795} 1796 1797/* 1798 * vm_page_free_toq: 1799 * 1800 * Returns the given page to the free list, 1801 * disassociating it with any VM object. 1802 * 1803 * Object and page must be locked prior to entry. 1804 * This routine may not block. 1805 */ 1806 1807void 1808vm_page_free_toq(vm_page_t m) 1809{ 1810 1811 if ((m->oflags & VPO_UNMANAGED) == 0) { 1812 vm_page_lock_assert(m, MA_OWNED); 1813 KASSERT(!pmap_page_is_mapped(m), 1814 ("vm_page_free_toq: freeing mapped page %p", m)); 1815 } 1816 PCPU_INC(cnt.v_tfree); 1817 1818 if (VM_PAGE_IS_FREE(m)) 1819 panic("vm_page_free: freeing free page %p", m); 1820 else if (m->busy != 0) 1821 panic("vm_page_free: freeing busy page %p", m); 1822 1823 /* 1824 * unqueue, then remove page. Note that we cannot destroy 1825 * the page here because we do not want to call the pager's 1826 * callback routine until after we've put the page on the 1827 * appropriate free queue. 1828 */ 1829 if ((m->oflags & VPO_UNMANAGED) == 0) 1830 vm_pageq_remove(m); 1831 vm_page_remove(m); 1832 1833 /* 1834 * If fictitious remove object association and 1835 * return, otherwise delay object association removal. 1836 */ 1837 if ((m->flags & PG_FICTITIOUS) != 0) { 1838 return; 1839 } 1840 1841 m->valid = 0; 1842 vm_page_undirty(m); 1843 1844 if (m->wire_count != 0) 1845 panic("vm_page_free: freeing wired page %p", m); 1846 if (m->hold_count != 0) { 1847 m->flags &= ~PG_ZERO; 1848 vm_page_lock_queues(); 1849 vm_page_enqueue(PQ_HOLD, m); 1850 vm_page_unlock_queues(); 1851 } else { 1852 /* 1853 * Restore the default memory attribute to the page. 1854 */ 1855 if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT) 1856 pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT); 1857 1858 /* 1859 * Insert the page into the physical memory allocator's 1860 * cache/free page queues. 1861 */ 1862 mtx_lock(&vm_page_queue_free_mtx); 1863 m->flags |= PG_FREE; 1864 cnt.v_free_count++; 1865#if VM_NRESERVLEVEL > 0 1866 if (!vm_reserv_free_page(m)) 1867#else 1868 if (TRUE) 1869#endif 1870 vm_phys_free_pages(m, 0); 1871 if ((m->flags & PG_ZERO) != 0) 1872 ++vm_page_zero_count; 1873 else 1874 vm_page_zero_idle_wakeup(); 1875 vm_page_free_wakeup(); 1876 mtx_unlock(&vm_page_queue_free_mtx); 1877 } 1878} 1879 1880/* 1881 * vm_page_wire: 1882 * 1883 * Mark this page as wired down by yet 1884 * another map, removing it from paging queues 1885 * as necessary. 1886 * 1887 * If the page is fictitious, then its wire count must remain one. 1888 * 1889 * The page must be locked. 1890 * This routine may not block. 1891 */ 1892void 1893vm_page_wire(vm_page_t m) 1894{ 1895 1896 /* 1897 * Only bump the wire statistics if the page is not already wired, 1898 * and only unqueue the page if it is on some queue (if it is unmanaged 1899 * it is already off the queues). 1900 */ 1901 vm_page_lock_assert(m, MA_OWNED); 1902 if ((m->flags & PG_FICTITIOUS) != 0) { 1903 KASSERT(m->wire_count == 1, 1904 ("vm_page_wire: fictitious page %p's wire count isn't one", 1905 m)); 1906 return; 1907 } 1908 if (m->wire_count == 0) { 1909 if ((m->oflags & VPO_UNMANAGED) == 0) 1910 vm_pageq_remove(m); 1911 atomic_add_int(&cnt.v_wire_count, 1); 1912 } 1913 m->wire_count++; 1914 KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m)); 1915} 1916 1917/* 1918 * vm_page_unwire: 1919 * 1920 * Release one wiring of the specified page, potentially enabling it to be 1921 * paged again. If paging is enabled, then the value of the parameter 1922 * "activate" determines to which queue the page is added. If "activate" is 1923 * non-zero, then the page is added to the active queue. Otherwise, it is 1924 * added to the inactive queue. 1925 * 1926 * However, unless the page belongs to an object, it is not enqueued because 1927 * it cannot be paged out. 1928 * 1929 * If a page is fictitious, then its wire count must alway be one. 1930 * 1931 * A managed page must be locked. 1932 */ 1933void 1934vm_page_unwire(vm_page_t m, int activate) 1935{ 1936 1937 if ((m->oflags & VPO_UNMANAGED) == 0) 1938 vm_page_lock_assert(m, MA_OWNED); 1939 if ((m->flags & PG_FICTITIOUS) != 0) { 1940 KASSERT(m->wire_count == 1, 1941 ("vm_page_unwire: fictitious page %p's wire count isn't one", m)); 1942 return; 1943 } 1944 if (m->wire_count > 0) { 1945 m->wire_count--; 1946 if (m->wire_count == 0) { 1947 atomic_subtract_int(&cnt.v_wire_count, 1); 1948 if ((m->oflags & VPO_UNMANAGED) != 0 || 1949 m->object == NULL) 1950 return; 1951 if (!activate) 1952 m->flags &= ~PG_WINATCFLS; 1953 vm_page_lock_queues(); 1954 vm_page_enqueue(activate ? PQ_ACTIVE : PQ_INACTIVE, m); 1955 vm_page_unlock_queues(); 1956 } 1957 } else 1958 panic("vm_page_unwire: page %p's wire count is zero", m); 1959} 1960 1961/* 1962 * Move the specified page to the inactive queue. 1963 * 1964 * Many pages placed on the inactive queue should actually go 1965 * into the cache, but it is difficult to figure out which. What 1966 * we do instead, if the inactive target is well met, is to put 1967 * clean pages at the head of the inactive queue instead of the tail. 1968 * This will cause them to be moved to the cache more quickly and 1969 * if not actively re-referenced, reclaimed more quickly. If we just 1970 * stick these pages at the end of the inactive queue, heavy filesystem 1971 * meta-data accesses can cause an unnecessary paging load on memory bound 1972 * processes. This optimization causes one-time-use metadata to be 1973 * reused more quickly. 1974 * 1975 * Normally athead is 0 resulting in LRU operation. athead is set 1976 * to 1 if we want this page to be 'as if it were placed in the cache', 1977 * except without unmapping it from the process address space. 1978 * 1979 * This routine may not block. 1980 */ 1981static inline void 1982_vm_page_deactivate(vm_page_t m, int athead) 1983{ 1984 int queue; 1985 1986 vm_page_lock_assert(m, MA_OWNED); 1987 1988 /* 1989 * Ignore if already inactive. 1990 */ 1991 if ((queue = m->queue) == PQ_INACTIVE) 1992 return; 1993 if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) { 1994 m->flags &= ~PG_WINATCFLS; 1995 vm_page_lock_queues(); 1996 if (queue != PQ_NONE) 1997 vm_page_queue_remove(queue, m); 1998 if (athead) 1999 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE].pl, m, 2000 pageq); 2001 else 2002 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, 2003 pageq); 2004 m->queue = PQ_INACTIVE; 2005 cnt.v_inactive_count++; 2006 vm_page_unlock_queues(); 2007 } 2008} 2009 2010/* 2011 * Move the specified page to the inactive queue. 2012 * 2013 * The page must be locked. 2014 */ 2015void 2016vm_page_deactivate(vm_page_t m) 2017{ 2018 2019 _vm_page_deactivate(m, 0); 2020} 2021 2022/* 2023 * vm_page_try_to_cache: 2024 * 2025 * Returns 0 on failure, 1 on success 2026 */ 2027int 2028vm_page_try_to_cache(vm_page_t m) 2029{ 2030 2031 vm_page_lock_assert(m, MA_OWNED); 2032 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2033 if (m->dirty || m->hold_count || m->busy || m->wire_count || 2034 (m->oflags & (VPO_BUSY | VPO_UNMANAGED)) != 0) 2035 return (0); 2036 pmap_remove_all(m); 2037 if (m->dirty) 2038 return (0); 2039 vm_page_cache(m); 2040 return (1); 2041} 2042 2043/* 2044 * vm_page_try_to_free() 2045 * 2046 * Attempt to free the page. If we cannot free it, we do nothing. 2047 * 1 is returned on success, 0 on failure. 2048 */ 2049int 2050vm_page_try_to_free(vm_page_t m) 2051{ 2052 2053 vm_page_lock_assert(m, MA_OWNED); 2054 if (m->object != NULL) 2055 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2056 if (m->dirty || m->hold_count || m->busy || m->wire_count || 2057 (m->oflags & (VPO_BUSY | VPO_UNMANAGED)) != 0) 2058 return (0); 2059 pmap_remove_all(m); 2060 if (m->dirty) 2061 return (0); 2062 vm_page_free(m); 2063 return (1); 2064} 2065 2066/* 2067 * vm_page_cache 2068 * 2069 * Put the specified page onto the page cache queue (if appropriate). 2070 * 2071 * This routine may not block. 2072 */ 2073void 2074vm_page_cache(vm_page_t m) 2075{ 2076 vm_object_t object; 2077 vm_page_t next, prev, root; 2078 2079 vm_page_lock_assert(m, MA_OWNED); 2080 object = m->object; 2081 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2082 if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) || m->busy || 2083 m->hold_count || m->wire_count) 2084 panic("vm_page_cache: attempting to cache busy page"); 2085 pmap_remove_all(m); 2086 if (m->dirty != 0) 2087 panic("vm_page_cache: page %p is dirty", m); 2088 if (m->valid == 0 || object->type == OBJT_DEFAULT || 2089 (object->type == OBJT_SWAP && 2090 !vm_pager_has_page(object, m->pindex, NULL, NULL))) { 2091 /* 2092 * Hypothesis: A cache-elgible page belonging to a 2093 * default object or swap object but without a backing 2094 * store must be zero filled. 2095 */ 2096 vm_page_free(m); 2097 return; 2098 } 2099 KASSERT((m->flags & PG_CACHED) == 0, 2100 ("vm_page_cache: page %p is already cached", m)); 2101 PCPU_INC(cnt.v_tcached); 2102 2103 /* 2104 * Remove the page from the paging queues. 2105 */ 2106 vm_pageq_remove(m); 2107 2108 /* 2109 * Remove the page from the object's collection of resident 2110 * pages. 2111 */ 2112 if ((next = TAILQ_NEXT(m, listq)) != NULL && next->left == m) { 2113 /* 2114 * Since the page's successor in the list is also its parent 2115 * in the tree, its right subtree must be empty. 2116 */ 2117 next->left = m->left; 2118 KASSERT(m->right == NULL, 2119 ("vm_page_cache: page %p has right child", m)); 2120 } else if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL && 2121 prev->right == m) { 2122 /* 2123 * Since the page's predecessor in the list is also its parent 2124 * in the tree, its left subtree must be empty. 2125 */ 2126 KASSERT(m->left == NULL, 2127 ("vm_page_cache: page %p has left child", m)); 2128 prev->right = m->right; 2129 } else { 2130 if (m != object->root) 2131 vm_page_splay(m->pindex, object->root); 2132 if (m->left == NULL) 2133 root = m->right; 2134 else if (m->right == NULL) 2135 root = m->left; 2136 else { 2137 /* 2138 * Move the page's successor to the root, because 2139 * pages are usually removed in ascending order. 2140 */ 2141 if (m->right != next) 2142 vm_page_splay(m->pindex, m->right); 2143 next->left = m->left; 2144 root = next; 2145 } 2146 object->root = root; 2147 } 2148 TAILQ_REMOVE(&object->memq, m, listq); 2149 object->resident_page_count--; 2150 2151 /* 2152 * Restore the default memory attribute to the page. 2153 */ 2154 if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT) 2155 pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT); 2156 2157 /* 2158 * Insert the page into the object's collection of cached pages 2159 * and the physical memory allocator's cache/free page queues. 2160 */ 2161 m->flags &= ~PG_ZERO; 2162 mtx_lock(&vm_page_queue_free_mtx); 2163 m->flags |= PG_CACHED; 2164 cnt.v_cache_count++; 2165 root = object->cache; 2166 if (root == NULL) { 2167 m->left = NULL; 2168 m->right = NULL; 2169 } else { 2170 root = vm_page_splay(m->pindex, root); 2171 if (m->pindex < root->pindex) { 2172 m->left = root->left; 2173 m->right = root; 2174 root->left = NULL; 2175 } else if (__predict_false(m->pindex == root->pindex)) 2176 panic("vm_page_cache: offset already cached"); 2177 else { 2178 m->right = root->right; 2179 m->left = root; 2180 root->right = NULL; 2181 } 2182 } 2183 object->cache = m; 2184#if VM_NRESERVLEVEL > 0 2185 if (!vm_reserv_free_page(m)) { 2186#else 2187 if (TRUE) { 2188#endif 2189 vm_phys_set_pool(VM_FREEPOOL_CACHE, m, 0); 2190 vm_phys_free_pages(m, 0); 2191 } 2192 vm_page_free_wakeup(); 2193 mtx_unlock(&vm_page_queue_free_mtx); 2194 2195 /* 2196 * Increment the vnode's hold count if this is the object's only 2197 * cached page. Decrement the vnode's hold count if this was 2198 * the object's only resident page. 2199 */ 2200 if (object->type == OBJT_VNODE) { 2201 if (root == NULL && object->resident_page_count != 0) 2202 vhold(object->handle); 2203 else if (root != NULL && object->resident_page_count == 0) 2204 vdrop(object->handle); 2205 } 2206} 2207 2208/* 2209 * vm_page_dontneed 2210 * 2211 * Cache, deactivate, or do nothing as appropriate. This routine 2212 * is typically used by madvise() MADV_DONTNEED. 2213 * 2214 * Generally speaking we want to move the page into the cache so 2215 * it gets reused quickly. However, this can result in a silly syndrome 2216 * due to the page recycling too quickly. Small objects will not be 2217 * fully cached. On the otherhand, if we move the page to the inactive 2218 * queue we wind up with a problem whereby very large objects 2219 * unnecessarily blow away our inactive and cache queues. 2220 * 2221 * The solution is to move the pages based on a fixed weighting. We 2222 * either leave them alone, deactivate them, or move them to the cache, 2223 * where moving them to the cache has the highest weighting. 2224 * By forcing some pages into other queues we eventually force the 2225 * system to balance the queues, potentially recovering other unrelated 2226 * space from active. The idea is to not force this to happen too 2227 * often. 2228 */ 2229void 2230vm_page_dontneed(vm_page_t m) 2231{ 2232 int dnw; 2233 int head; 2234 2235 vm_page_lock_assert(m, MA_OWNED); 2236 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2237 dnw = PCPU_GET(dnweight); 2238 PCPU_INC(dnweight); 2239 2240 /* 2241 * Occasionally leave the page alone. 2242 */ 2243 if ((dnw & 0x01F0) == 0 || m->queue == PQ_INACTIVE) { 2244 if (m->act_count >= ACT_INIT) 2245 --m->act_count; 2246 return; 2247 } 2248 2249 /* 2250 * Clear any references to the page. Otherwise, the page daemon will 2251 * immediately reactivate the page. 2252 * 2253 * Perform the pmap_clear_reference() first. Otherwise, a concurrent 2254 * pmap operation, such as pmap_remove(), could clear a reference in 2255 * the pmap and set PGA_REFERENCED on the page before the 2256 * pmap_clear_reference() had completed. Consequently, the page would 2257 * appear referenced based upon an old reference that occurred before 2258 * this function ran. 2259 */ 2260 pmap_clear_reference(m); 2261 vm_page_aflag_clear(m, PGA_REFERENCED); 2262 2263 if (m->dirty == 0 && pmap_is_modified(m)) 2264 vm_page_dirty(m); 2265 2266 if (m->dirty || (dnw & 0x0070) == 0) { 2267 /* 2268 * Deactivate the page 3 times out of 32. 2269 */ 2270 head = 0; 2271 } else { 2272 /* 2273 * Cache the page 28 times out of every 32. Note that 2274 * the page is deactivated instead of cached, but placed 2275 * at the head of the queue instead of the tail. 2276 */ 2277 head = 1; 2278 } 2279 _vm_page_deactivate(m, head); 2280} 2281 2282/* 2283 * Grab a page, waiting until we are waken up due to the page 2284 * changing state. We keep on waiting, if the page continues 2285 * to be in the object. If the page doesn't exist, first allocate it 2286 * and then conditionally zero it. 2287 * 2288 * The caller must always specify the VM_ALLOC_RETRY flag. This is intended 2289 * to facilitate its eventual removal. 2290 * 2291 * This routine may block. 2292 */ 2293vm_page_t 2294vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags) 2295{ 2296 vm_page_t m; 2297 2298 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2299 KASSERT((allocflags & VM_ALLOC_RETRY) != 0, 2300 ("vm_page_grab: VM_ALLOC_RETRY is required")); 2301retrylookup: 2302 if ((m = vm_page_lookup(object, pindex)) != NULL) { 2303 if ((m->oflags & VPO_BUSY) != 0 || 2304 ((allocflags & VM_ALLOC_IGN_SBUSY) == 0 && m->busy != 0)) { 2305 /* 2306 * Reference the page before unlocking and 2307 * sleeping so that the page daemon is less 2308 * likely to reclaim it. 2309 */ 2310 vm_page_aflag_set(m, PGA_REFERENCED); 2311 vm_page_sleep(m, "pgrbwt"); 2312 goto retrylookup; 2313 } else { 2314 if ((allocflags & VM_ALLOC_WIRED) != 0) { 2315 vm_page_lock(m); 2316 vm_page_wire(m); 2317 vm_page_unlock(m); 2318 } 2319 if ((allocflags & VM_ALLOC_NOBUSY) == 0) 2320 vm_page_busy(m); 2321 return (m); 2322 } 2323 } 2324 m = vm_page_alloc(object, pindex, allocflags & ~(VM_ALLOC_RETRY | 2325 VM_ALLOC_IGN_SBUSY)); 2326 if (m == NULL) { 2327 VM_OBJECT_UNLOCK(object); 2328 VM_WAIT; 2329 VM_OBJECT_LOCK(object); 2330 goto retrylookup; 2331 } else if (m->valid != 0) 2332 return (m); 2333 if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0) 2334 pmap_zero_page(m); 2335 return (m); 2336} 2337 2338/* 2339 * Mapping function for valid bits or for dirty bits in 2340 * a page. May not block. 2341 * 2342 * Inputs are required to range within a page. 2343 */ 2344vm_page_bits_t 2345vm_page_bits(int base, int size) 2346{ 2347 int first_bit; 2348 int last_bit; 2349 2350 KASSERT( 2351 base + size <= PAGE_SIZE, 2352 ("vm_page_bits: illegal base/size %d/%d", base, size) 2353 ); 2354 2355 if (size == 0) /* handle degenerate case */ 2356 return (0); 2357 2358 first_bit = base >> DEV_BSHIFT; 2359 last_bit = (base + size - 1) >> DEV_BSHIFT; 2360 2361 return (((vm_page_bits_t)2 << last_bit) - 2362 ((vm_page_bits_t)1 << first_bit)); 2363} 2364 2365/* 2366 * vm_page_set_valid: 2367 * 2368 * Sets portions of a page valid. The arguments are expected 2369 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive 2370 * of any partial chunks touched by the range. The invalid portion of 2371 * such chunks will be zeroed. 2372 * 2373 * (base + size) must be less then or equal to PAGE_SIZE. 2374 */ 2375void 2376vm_page_set_valid(vm_page_t m, int base, int size) 2377{ 2378 int endoff, frag; 2379 2380 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2381 if (size == 0) /* handle degenerate case */ 2382 return; 2383 2384 /* 2385 * If the base is not DEV_BSIZE aligned and the valid 2386 * bit is clear, we have to zero out a portion of the 2387 * first block. 2388 */ 2389 if ((frag = base & ~(DEV_BSIZE - 1)) != base && 2390 (m->valid & (1 << (base >> DEV_BSHIFT))) == 0) 2391 pmap_zero_page_area(m, frag, base - frag); 2392 2393 /* 2394 * If the ending offset is not DEV_BSIZE aligned and the 2395 * valid bit is clear, we have to zero out a portion of 2396 * the last block. 2397 */ 2398 endoff = base + size; 2399 if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff && 2400 (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0) 2401 pmap_zero_page_area(m, endoff, 2402 DEV_BSIZE - (endoff & (DEV_BSIZE - 1))); 2403 2404 /* 2405 * Assert that no previously invalid block that is now being validated 2406 * is already dirty. 2407 */ 2408 KASSERT((~m->valid & vm_page_bits(base, size) & m->dirty) == 0, 2409 ("vm_page_set_valid: page %p is dirty", m)); 2410 2411 /* 2412 * Set valid bits inclusive of any overlap. 2413 */ 2414 m->valid |= vm_page_bits(base, size); 2415} 2416 2417/* 2418 * Clear the given bits from the specified page's dirty field. 2419 */ 2420static __inline void 2421vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits) 2422{ 2423 uintptr_t addr; 2424#if PAGE_SIZE < 16384 2425 int shift; 2426#endif 2427 2428 /* 2429 * If the object is locked and the page is neither VPO_BUSY nor 2430 * PGA_WRITEABLE, then the page's dirty field cannot possibly be 2431 * set by a concurrent pmap operation. 2432 */ 2433 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2434 if ((m->oflags & VPO_BUSY) == 0 && (m->aflags & PGA_WRITEABLE) == 0) 2435 m->dirty &= ~pagebits; 2436 else { 2437 /* 2438 * The pmap layer can call vm_page_dirty() without 2439 * holding a distinguished lock. The combination of 2440 * the object's lock and an atomic operation suffice 2441 * to guarantee consistency of the page dirty field. 2442 * 2443 * For PAGE_SIZE == 32768 case, compiler already 2444 * properly aligns the dirty field, so no forcible 2445 * alignment is needed. Only require existence of 2446 * atomic_clear_64 when page size is 32768. 2447 */ 2448 addr = (uintptr_t)&m->dirty; 2449#if PAGE_SIZE == 32768 2450 atomic_clear_64((uint64_t *)addr, pagebits); 2451#elif PAGE_SIZE == 16384 2452 atomic_clear_32((uint32_t *)addr, pagebits); 2453#else /* PAGE_SIZE <= 8192 */ 2454 /* 2455 * Use a trick to perform a 32-bit atomic on the 2456 * containing aligned word, to not depend on the existence 2457 * of atomic_clear_{8, 16}. 2458 */ 2459 shift = addr & (sizeof(uint32_t) - 1); 2460#if BYTE_ORDER == BIG_ENDIAN 2461 shift = (sizeof(uint32_t) - sizeof(m->dirty) - shift) * NBBY; 2462#else 2463 shift *= NBBY; 2464#endif 2465 addr &= ~(sizeof(uint32_t) - 1); 2466 atomic_clear_32((uint32_t *)addr, pagebits << shift); 2467#endif /* PAGE_SIZE */ 2468 } 2469} 2470 2471/* 2472 * vm_page_set_validclean: 2473 * 2474 * Sets portions of a page valid and clean. The arguments are expected 2475 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive 2476 * of any partial chunks touched by the range. The invalid portion of 2477 * such chunks will be zero'd. 2478 * 2479 * This routine may not block. 2480 * 2481 * (base + size) must be less then or equal to PAGE_SIZE. 2482 */ 2483void 2484vm_page_set_validclean(vm_page_t m, int base, int size) 2485{ 2486 vm_page_bits_t oldvalid, pagebits; 2487 int endoff, frag; 2488 2489 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2490 if (size == 0) /* handle degenerate case */ 2491 return; 2492 2493 /* 2494 * If the base is not DEV_BSIZE aligned and the valid 2495 * bit is clear, we have to zero out a portion of the 2496 * first block. 2497 */ 2498 if ((frag = base & ~(DEV_BSIZE - 1)) != base && 2499 (m->valid & ((vm_page_bits_t)1 << (base >> DEV_BSHIFT))) == 0) 2500 pmap_zero_page_area(m, frag, base - frag); 2501 2502 /* 2503 * If the ending offset is not DEV_BSIZE aligned and the 2504 * valid bit is clear, we have to zero out a portion of 2505 * the last block. 2506 */ 2507 endoff = base + size; 2508 if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff && 2509 (m->valid & ((vm_page_bits_t)1 << (endoff >> DEV_BSHIFT))) == 0) 2510 pmap_zero_page_area(m, endoff, 2511 DEV_BSIZE - (endoff & (DEV_BSIZE - 1))); 2512 2513 /* 2514 * Set valid, clear dirty bits. If validating the entire 2515 * page we can safely clear the pmap modify bit. We also 2516 * use this opportunity to clear the VPO_NOSYNC flag. If a process 2517 * takes a write fault on a MAP_NOSYNC memory area the flag will 2518 * be set again. 2519 * 2520 * We set valid bits inclusive of any overlap, but we can only 2521 * clear dirty bits for DEV_BSIZE chunks that are fully within 2522 * the range. 2523 */ 2524 oldvalid = m->valid; 2525 pagebits = vm_page_bits(base, size); 2526 m->valid |= pagebits; 2527#if 0 /* NOT YET */ 2528 if ((frag = base & (DEV_BSIZE - 1)) != 0) { 2529 frag = DEV_BSIZE - frag; 2530 base += frag; 2531 size -= frag; 2532 if (size < 0) 2533 size = 0; 2534 } 2535 pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1)); 2536#endif 2537 if (base == 0 && size == PAGE_SIZE) { 2538 /* 2539 * The page can only be modified within the pmap if it is 2540 * mapped, and it can only be mapped if it was previously 2541 * fully valid. 2542 */ 2543 if (oldvalid == VM_PAGE_BITS_ALL) 2544 /* 2545 * Perform the pmap_clear_modify() first. Otherwise, 2546 * a concurrent pmap operation, such as 2547 * pmap_protect(), could clear a modification in the 2548 * pmap and set the dirty field on the page before 2549 * pmap_clear_modify() had begun and after the dirty 2550 * field was cleared here. 2551 */ 2552 pmap_clear_modify(m); 2553 m->dirty = 0; 2554 m->oflags &= ~VPO_NOSYNC; 2555 } else if (oldvalid != VM_PAGE_BITS_ALL) 2556 m->dirty &= ~pagebits; 2557 else 2558 vm_page_clear_dirty_mask(m, pagebits); 2559} 2560 2561void 2562vm_page_clear_dirty(vm_page_t m, int base, int size) 2563{ 2564 2565 vm_page_clear_dirty_mask(m, vm_page_bits(base, size)); 2566} 2567 2568/* 2569 * vm_page_set_invalid: 2570 * 2571 * Invalidates DEV_BSIZE'd chunks within a page. Both the 2572 * valid and dirty bits for the effected areas are cleared. 2573 * 2574 * May not block. 2575 */ 2576void 2577vm_page_set_invalid(vm_page_t m, int base, int size) 2578{ 2579 vm_page_bits_t bits; 2580 2581 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2582 KASSERT((m->oflags & VPO_BUSY) == 0, 2583 ("vm_page_set_invalid: page %p is busy", m)); 2584 bits = vm_page_bits(base, size); 2585 if (m->valid == VM_PAGE_BITS_ALL && bits != 0) 2586 pmap_remove_all(m); 2587 KASSERT(!pmap_page_is_mapped(m), 2588 ("vm_page_set_invalid: page %p is mapped", m)); 2589 m->valid &= ~bits; 2590 m->dirty &= ~bits; 2591} 2592 2593/* 2594 * vm_page_zero_invalid() 2595 * 2596 * The kernel assumes that the invalid portions of a page contain 2597 * garbage, but such pages can be mapped into memory by user code. 2598 * When this occurs, we must zero out the non-valid portions of the 2599 * page so user code sees what it expects. 2600 * 2601 * Pages are most often semi-valid when the end of a file is mapped 2602 * into memory and the file's size is not page aligned. 2603 */ 2604void 2605vm_page_zero_invalid(vm_page_t m, boolean_t setvalid) 2606{ 2607 int b; 2608 int i; 2609 2610 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2611 /* 2612 * Scan the valid bits looking for invalid sections that 2613 * must be zerod. Invalid sub-DEV_BSIZE'd areas ( where the 2614 * valid bit may be set ) have already been zerod by 2615 * vm_page_set_validclean(). 2616 */ 2617 for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) { 2618 if (i == (PAGE_SIZE / DEV_BSIZE) || 2619 (m->valid & ((vm_page_bits_t)1 << i))) { 2620 if (i > b) { 2621 pmap_zero_page_area(m, 2622 b << DEV_BSHIFT, (i - b) << DEV_BSHIFT); 2623 } 2624 b = i + 1; 2625 } 2626 } 2627 2628 /* 2629 * setvalid is TRUE when we can safely set the zero'd areas 2630 * as being valid. We can do this if there are no cache consistancy 2631 * issues. e.g. it is ok to do with UFS, but not ok to do with NFS. 2632 */ 2633 if (setvalid) 2634 m->valid = VM_PAGE_BITS_ALL; 2635} 2636 2637/* 2638 * vm_page_is_valid: 2639 * 2640 * Is (partial) page valid? Note that the case where size == 0 2641 * will return FALSE in the degenerate case where the page is 2642 * entirely invalid, and TRUE otherwise. 2643 * 2644 * May not block. 2645 */ 2646int 2647vm_page_is_valid(vm_page_t m, int base, int size) 2648{ 2649 vm_page_bits_t bits; 2650 2651 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2652 bits = vm_page_bits(base, size); 2653 if (m->valid && ((m->valid & bits) == bits)) 2654 return 1; 2655 else 2656 return 0; 2657} 2658 2659/* 2660 * update dirty bits from pmap/mmu. May not block. 2661 */ 2662void 2663vm_page_test_dirty(vm_page_t m) 2664{ 2665 2666 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2667 if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m)) 2668 vm_page_dirty(m); 2669} 2670 2671void 2672vm_page_lock_KBI(vm_page_t m, const char *file, int line) 2673{ 2674 2675 mtx_lock_flags_(vm_page_lockptr(m), 0, file, line); 2676} 2677 2678void 2679vm_page_unlock_KBI(vm_page_t m, const char *file, int line) 2680{ 2681 2682 mtx_unlock_flags_(vm_page_lockptr(m), 0, file, line); 2683} 2684 2685int 2686vm_page_trylock_KBI(vm_page_t m, const char *file, int line) 2687{ 2688 2689 return (mtx_trylock_flags_(vm_page_lockptr(m), 0, file, line)); 2690} 2691 2692#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) 2693void 2694vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line) 2695{ 2696 2697 mtx_assert_(vm_page_lockptr(m), a, file, line); 2698} 2699#endif 2700 2701int so_zerocp_fullpage = 0; 2702 2703/* 2704 * Replace the given page with a copy. The copied page assumes 2705 * the portion of the given page's "wire_count" that is not the 2706 * responsibility of this copy-on-write mechanism. 2707 * 2708 * The object containing the given page must have a non-zero 2709 * paging-in-progress count and be locked. 2710 */ 2711void 2712vm_page_cowfault(vm_page_t m) 2713{ 2714 vm_page_t mnew; 2715 vm_object_t object; 2716 vm_pindex_t pindex; 2717 2718 mtx_assert(&vm_page_queue_mtx, MA_NOTOWNED); 2719 vm_page_lock_assert(m, MA_OWNED); 2720 object = m->object; 2721 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2722 KASSERT(object->paging_in_progress != 0, 2723 ("vm_page_cowfault: object %p's paging-in-progress count is zero.", 2724 object)); 2725 pindex = m->pindex; 2726 2727 retry_alloc: 2728 pmap_remove_all(m); 2729 vm_page_remove(m); 2730 mnew = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY); 2731 if (mnew == NULL) { 2732 vm_page_insert(m, object, pindex); 2733 vm_page_unlock(m); 2734 VM_OBJECT_UNLOCK(object); 2735 VM_WAIT; 2736 VM_OBJECT_LOCK(object); 2737 if (m == vm_page_lookup(object, pindex)) { 2738 vm_page_lock(m); 2739 goto retry_alloc; 2740 } else { 2741 /* 2742 * Page disappeared during the wait. 2743 */ 2744 return; 2745 } 2746 } 2747 2748 if (m->cow == 0) { 2749 /* 2750 * check to see if we raced with an xmit complete when 2751 * waiting to allocate a page. If so, put things back 2752 * the way they were 2753 */ 2754 vm_page_unlock(m); 2755 vm_page_lock(mnew); 2756 vm_page_free(mnew); 2757 vm_page_unlock(mnew); 2758 vm_page_insert(m, object, pindex); 2759 } else { /* clear COW & copy page */ 2760 if (!so_zerocp_fullpage) 2761 pmap_copy_page(m, mnew); 2762 mnew->valid = VM_PAGE_BITS_ALL; 2763 vm_page_dirty(mnew); 2764 mnew->wire_count = m->wire_count - m->cow; 2765 m->wire_count = m->cow; 2766 vm_page_unlock(m); 2767 } 2768} 2769 2770void 2771vm_page_cowclear(vm_page_t m) 2772{ 2773 2774 vm_page_lock_assert(m, MA_OWNED); 2775 if (m->cow) { 2776 m->cow--; 2777 /* 2778 * let vm_fault add back write permission lazily 2779 */ 2780 } 2781 /* 2782 * sf_buf_free() will free the page, so we needn't do it here 2783 */ 2784} 2785 2786int 2787vm_page_cowsetup(vm_page_t m) 2788{ 2789 2790 vm_page_lock_assert(m, MA_OWNED); 2791 if ((m->flags & PG_FICTITIOUS) != 0 || 2792 (m->oflags & VPO_UNMANAGED) != 0 || 2793 m->cow == USHRT_MAX - 1 || !VM_OBJECT_TRYLOCK(m->object)) 2794 return (EBUSY); 2795 m->cow++; 2796 pmap_remove_write(m); 2797 VM_OBJECT_UNLOCK(m->object); 2798 return (0); 2799} 2800 2801#ifdef INVARIANTS 2802void 2803vm_page_object_lock_assert(vm_page_t m) 2804{ 2805 2806 /* 2807 * Certain of the page's fields may only be modified by the 2808 * holder of the containing object's lock or the setter of the 2809 * page's VPO_BUSY flag. Unfortunately, the setter of the 2810 * VPO_BUSY flag is not recorded, and thus cannot be checked 2811 * here. 2812 */ 2813 if (m->object != NULL && (m->oflags & VPO_BUSY) == 0) 2814 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2815} 2816#endif 2817 2818#include "opt_ddb.h" 2819#ifdef DDB 2820#include <sys/kernel.h> 2821 2822#include <ddb/ddb.h> 2823 2824DB_SHOW_COMMAND(page, vm_page_print_page_info) 2825{ 2826 db_printf("cnt.v_free_count: %d\n", cnt.v_free_count); 2827 db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count); 2828 db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count); 2829 db_printf("cnt.v_active_count: %d\n", cnt.v_active_count); 2830 db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count); 2831 db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved); 2832 db_printf("cnt.v_free_min: %d\n", cnt.v_free_min); 2833 db_printf("cnt.v_free_target: %d\n", cnt.v_free_target); 2834 db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min); 2835 db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target); 2836} 2837 2838DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info) 2839{ 2840 2841 db_printf("PQ_FREE:"); 2842 db_printf(" %d", cnt.v_free_count); 2843 db_printf("\n"); 2844 2845 db_printf("PQ_CACHE:"); 2846 db_printf(" %d", cnt.v_cache_count); 2847 db_printf("\n"); 2848 2849 db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n", 2850 *vm_page_queues[PQ_ACTIVE].cnt, 2851 *vm_page_queues[PQ_INACTIVE].cnt); 2852} 2853#endif /* DDB */ 2854