vm_page.c revision 239040
1/*- 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1998 Matthew Dillon. All Rights Reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * The Mach Operating System project at Carnegie-Mellon University. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 34 */ 35 36/*- 37 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 38 * All rights reserved. 39 * 40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 */ 62 63/* 64 * GENERAL RULES ON VM_PAGE MANIPULATION 65 * 66 * - a pageq mutex is required when adding or removing a page from a 67 * page queue (vm_page_queue[]), regardless of other mutexes or the 68 * busy state of a page. 69 * 70 * - The object mutex is held when inserting or removing 71 * pages from an object (vm_page_insert() or vm_page_remove()). 72 * 73 */ 74 75/* 76 * Resident memory management module. 77 */ 78 79#include <sys/cdefs.h> 80__FBSDID("$FreeBSD: head/sys/vm/vm_page.c 239040 2012-08-04 18:16:43Z kib $"); 81 82#include "opt_vm.h" 83 84#include <sys/param.h> 85#include <sys/systm.h> 86#include <sys/lock.h> 87#include <sys/kernel.h> 88#include <sys/limits.h> 89#include <sys/malloc.h> 90#include <sys/msgbuf.h> 91#include <sys/mutex.h> 92#include <sys/proc.h> 93#include <sys/sysctl.h> 94#include <sys/vmmeter.h> 95#include <sys/vnode.h> 96 97#include <vm/vm.h> 98#include <vm/pmap.h> 99#include <vm/vm_param.h> 100#include <vm/vm_kern.h> 101#include <vm/vm_object.h> 102#include <vm/vm_page.h> 103#include <vm/vm_pageout.h> 104#include <vm/vm_pager.h> 105#include <vm/vm_phys.h> 106#include <vm/vm_reserv.h> 107#include <vm/vm_extern.h> 108#include <vm/uma.h> 109#include <vm/uma_int.h> 110 111#include <machine/md_var.h> 112 113/* 114 * Associated with page of user-allocatable memory is a 115 * page structure. 116 */ 117 118struct vpgqueues vm_page_queues[PQ_COUNT]; 119struct vpglocks vm_page_queue_lock; 120struct vpglocks vm_page_queue_free_lock; 121 122struct vpglocks pa_lock[PA_LOCK_COUNT]; 123 124vm_page_t vm_page_array; 125long vm_page_array_size; 126long first_page; 127int vm_page_zero_count; 128 129static int boot_pages = UMA_BOOT_PAGES; 130TUNABLE_INT("vm.boot_pages", &boot_pages); 131SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RD, &boot_pages, 0, 132 "number of pages allocated for bootstrapping the VM system"); 133 134static int pa_tryrelock_restart; 135SYSCTL_INT(_vm, OID_AUTO, tryrelock_restart, CTLFLAG_RD, 136 &pa_tryrelock_restart, 0, "Number of tryrelock restarts"); 137 138static uma_zone_t fakepg_zone; 139 140static struct vnode *vm_page_alloc_init(vm_page_t m); 141static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits); 142static void vm_page_queue_remove(int queue, vm_page_t m); 143static void vm_page_enqueue(int queue, vm_page_t m); 144static void vm_page_init_fakepg(void *dummy); 145 146SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init_fakepg, NULL); 147 148static void 149vm_page_init_fakepg(void *dummy) 150{ 151 152 fakepg_zone = uma_zcreate("fakepg", sizeof(struct vm_page), NULL, NULL, 153 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE | UMA_ZONE_VM); 154} 155 156/* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */ 157#if PAGE_SIZE == 32768 158#ifdef CTASSERT 159CTASSERT(sizeof(u_long) >= 8); 160#endif 161#endif 162 163/* 164 * Try to acquire a physical address lock while a pmap is locked. If we 165 * fail to trylock we unlock and lock the pmap directly and cache the 166 * locked pa in *locked. The caller should then restart their loop in case 167 * the virtual to physical mapping has changed. 168 */ 169int 170vm_page_pa_tryrelock(pmap_t pmap, vm_paddr_t pa, vm_paddr_t *locked) 171{ 172 vm_paddr_t lockpa; 173 174 lockpa = *locked; 175 *locked = pa; 176 if (lockpa) { 177 PA_LOCK_ASSERT(lockpa, MA_OWNED); 178 if (PA_LOCKPTR(pa) == PA_LOCKPTR(lockpa)) 179 return (0); 180 PA_UNLOCK(lockpa); 181 } 182 if (PA_TRYLOCK(pa)) 183 return (0); 184 PMAP_UNLOCK(pmap); 185 atomic_add_int(&pa_tryrelock_restart, 1); 186 PA_LOCK(pa); 187 PMAP_LOCK(pmap); 188 return (EAGAIN); 189} 190 191/* 192 * vm_set_page_size: 193 * 194 * Sets the page size, perhaps based upon the memory 195 * size. Must be called before any use of page-size 196 * dependent functions. 197 */ 198void 199vm_set_page_size(void) 200{ 201 if (cnt.v_page_size == 0) 202 cnt.v_page_size = PAGE_SIZE; 203 if (((cnt.v_page_size - 1) & cnt.v_page_size) != 0) 204 panic("vm_set_page_size: page size not a power of two"); 205} 206 207/* 208 * vm_page_blacklist_lookup: 209 * 210 * See if a physical address in this page has been listed 211 * in the blacklist tunable. Entries in the tunable are 212 * separated by spaces or commas. If an invalid integer is 213 * encountered then the rest of the string is skipped. 214 */ 215static int 216vm_page_blacklist_lookup(char *list, vm_paddr_t pa) 217{ 218 vm_paddr_t bad; 219 char *cp, *pos; 220 221 for (pos = list; *pos != '\0'; pos = cp) { 222 bad = strtoq(pos, &cp, 0); 223 if (*cp != '\0') { 224 if (*cp == ' ' || *cp == ',') { 225 cp++; 226 if (cp == pos) 227 continue; 228 } else 229 break; 230 } 231 if (pa == trunc_page(bad)) 232 return (1); 233 } 234 return (0); 235} 236 237/* 238 * vm_page_startup: 239 * 240 * Initializes the resident memory module. 241 * 242 * Allocates memory for the page cells, and 243 * for the object/offset-to-page hash table headers. 244 * Each page cell is initialized and placed on the free list. 245 */ 246vm_offset_t 247vm_page_startup(vm_offset_t vaddr) 248{ 249 vm_offset_t mapped; 250 vm_paddr_t page_range; 251 vm_paddr_t new_end; 252 int i; 253 vm_paddr_t pa; 254 vm_paddr_t last_pa; 255 char *list; 256 257 /* the biggest memory array is the second group of pages */ 258 vm_paddr_t end; 259 vm_paddr_t biggestsize; 260 vm_paddr_t low_water, high_water; 261 int biggestone; 262 263 biggestsize = 0; 264 biggestone = 0; 265 vaddr = round_page(vaddr); 266 267 for (i = 0; phys_avail[i + 1]; i += 2) { 268 phys_avail[i] = round_page(phys_avail[i]); 269 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]); 270 } 271 272 low_water = phys_avail[0]; 273 high_water = phys_avail[1]; 274 275 for (i = 0; phys_avail[i + 1]; i += 2) { 276 vm_paddr_t size = phys_avail[i + 1] - phys_avail[i]; 277 278 if (size > biggestsize) { 279 biggestone = i; 280 biggestsize = size; 281 } 282 if (phys_avail[i] < low_water) 283 low_water = phys_avail[i]; 284 if (phys_avail[i + 1] > high_water) 285 high_water = phys_avail[i + 1]; 286 } 287 288#ifdef XEN 289 low_water = 0; 290#endif 291 292 end = phys_avail[biggestone+1]; 293 294 /* 295 * Initialize the page and queue locks. 296 */ 297 mtx_init(&vm_page_queue_mtx, "vm page queue", NULL, MTX_DEF | 298 MTX_RECURSE); 299 mtx_init(&vm_page_queue_free_mtx, "vm page free queue", NULL, MTX_DEF); 300 for (i = 0; i < PA_LOCK_COUNT; i++) 301 mtx_init(&pa_lock[i].data, "vm page", NULL, MTX_DEF); 302 303 /* 304 * Initialize the queue headers for the hold queue, the active queue, 305 * and the inactive queue. 306 */ 307 for (i = 0; i < PQ_COUNT; i++) 308 TAILQ_INIT(&vm_page_queues[i].pl); 309 vm_page_queues[PQ_INACTIVE].cnt = &cnt.v_inactive_count; 310 vm_page_queues[PQ_ACTIVE].cnt = &cnt.v_active_count; 311 vm_page_queues[PQ_HOLD].cnt = &cnt.v_active_count; 312 313 /* 314 * Allocate memory for use when boot strapping the kernel memory 315 * allocator. 316 */ 317 new_end = end - (boot_pages * UMA_SLAB_SIZE); 318 new_end = trunc_page(new_end); 319 mapped = pmap_map(&vaddr, new_end, end, 320 VM_PROT_READ | VM_PROT_WRITE); 321 bzero((void *)mapped, end - new_end); 322 uma_startup((void *)mapped, boot_pages); 323 324#if defined(__amd64__) || defined(__i386__) || defined(__arm__) || \ 325 defined(__mips__) 326 /* 327 * Allocate a bitmap to indicate that a random physical page 328 * needs to be included in a minidump. 329 * 330 * The amd64 port needs this to indicate which direct map pages 331 * need to be dumped, via calls to dump_add_page()/dump_drop_page(). 332 * 333 * However, i386 still needs this workspace internally within the 334 * minidump code. In theory, they are not needed on i386, but are 335 * included should the sf_buf code decide to use them. 336 */ 337 last_pa = 0; 338 for (i = 0; dump_avail[i + 1] != 0; i += 2) 339 if (dump_avail[i + 1] > last_pa) 340 last_pa = dump_avail[i + 1]; 341 page_range = last_pa / PAGE_SIZE; 342 vm_page_dump_size = round_page(roundup2(page_range, NBBY) / NBBY); 343 new_end -= vm_page_dump_size; 344 vm_page_dump = (void *)(uintptr_t)pmap_map(&vaddr, new_end, 345 new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE); 346 bzero((void *)vm_page_dump, vm_page_dump_size); 347#endif 348#ifdef __amd64__ 349 /* 350 * Request that the physical pages underlying the message buffer be 351 * included in a crash dump. Since the message buffer is accessed 352 * through the direct map, they are not automatically included. 353 */ 354 pa = DMAP_TO_PHYS((vm_offset_t)msgbufp->msg_ptr); 355 last_pa = pa + round_page(msgbufsize); 356 while (pa < last_pa) { 357 dump_add_page(pa); 358 pa += PAGE_SIZE; 359 } 360#endif 361 /* 362 * Compute the number of pages of memory that will be available for 363 * use (taking into account the overhead of a page structure per 364 * page). 365 */ 366 first_page = low_water / PAGE_SIZE; 367#ifdef VM_PHYSSEG_SPARSE 368 page_range = 0; 369 for (i = 0; phys_avail[i + 1] != 0; i += 2) 370 page_range += atop(phys_avail[i + 1] - phys_avail[i]); 371#elif defined(VM_PHYSSEG_DENSE) 372 page_range = high_water / PAGE_SIZE - first_page; 373#else 374#error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined." 375#endif 376 end = new_end; 377 378 /* 379 * Reserve an unmapped guard page to trap access to vm_page_array[-1]. 380 */ 381 vaddr += PAGE_SIZE; 382 383 /* 384 * Initialize the mem entry structures now, and put them in the free 385 * queue. 386 */ 387 new_end = trunc_page(end - page_range * sizeof(struct vm_page)); 388 mapped = pmap_map(&vaddr, new_end, end, 389 VM_PROT_READ | VM_PROT_WRITE); 390 vm_page_array = (vm_page_t) mapped; 391#if VM_NRESERVLEVEL > 0 392 /* 393 * Allocate memory for the reservation management system's data 394 * structures. 395 */ 396 new_end = vm_reserv_startup(&vaddr, new_end, high_water); 397#endif 398#if defined(__amd64__) || defined(__mips__) 399 /* 400 * pmap_map on amd64 and mips can come out of the direct-map, not kvm 401 * like i386, so the pages must be tracked for a crashdump to include 402 * this data. This includes the vm_page_array and the early UMA 403 * bootstrap pages. 404 */ 405 for (pa = new_end; pa < phys_avail[biggestone + 1]; pa += PAGE_SIZE) 406 dump_add_page(pa); 407#endif 408 phys_avail[biggestone + 1] = new_end; 409 410 /* 411 * Clear all of the page structures 412 */ 413 bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page)); 414 for (i = 0; i < page_range; i++) 415 vm_page_array[i].order = VM_NFREEORDER; 416 vm_page_array_size = page_range; 417 418 /* 419 * Initialize the physical memory allocator. 420 */ 421 vm_phys_init(); 422 423 /* 424 * Add every available physical page that is not blacklisted to 425 * the free lists. 426 */ 427 cnt.v_page_count = 0; 428 cnt.v_free_count = 0; 429 list = getenv("vm.blacklist"); 430 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 431 pa = phys_avail[i]; 432 last_pa = phys_avail[i + 1]; 433 while (pa < last_pa) { 434 if (list != NULL && 435 vm_page_blacklist_lookup(list, pa)) 436 printf("Skipping page with pa 0x%jx\n", 437 (uintmax_t)pa); 438 else 439 vm_phys_add_page(pa); 440 pa += PAGE_SIZE; 441 } 442 } 443 freeenv(list); 444#if VM_NRESERVLEVEL > 0 445 /* 446 * Initialize the reservation management system. 447 */ 448 vm_reserv_init(); 449#endif 450 return (vaddr); 451} 452 453void 454vm_page_reference(vm_page_t m) 455{ 456 457 vm_page_aflag_set(m, PGA_REFERENCED); 458} 459 460void 461vm_page_busy(vm_page_t m) 462{ 463 464 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 465 KASSERT((m->oflags & VPO_BUSY) == 0, 466 ("vm_page_busy: page already busy!!!")); 467 m->oflags |= VPO_BUSY; 468} 469 470/* 471 * vm_page_flash: 472 * 473 * wakeup anyone waiting for the page. 474 */ 475void 476vm_page_flash(vm_page_t m) 477{ 478 479 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 480 if (m->oflags & VPO_WANTED) { 481 m->oflags &= ~VPO_WANTED; 482 wakeup(m); 483 } 484} 485 486/* 487 * vm_page_wakeup: 488 * 489 * clear the VPO_BUSY flag and wakeup anyone waiting for the 490 * page. 491 * 492 */ 493void 494vm_page_wakeup(vm_page_t m) 495{ 496 497 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 498 KASSERT(m->oflags & VPO_BUSY, ("vm_page_wakeup: page not busy!!!")); 499 m->oflags &= ~VPO_BUSY; 500 vm_page_flash(m); 501} 502 503void 504vm_page_io_start(vm_page_t m) 505{ 506 507 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 508 m->busy++; 509} 510 511void 512vm_page_io_finish(vm_page_t m) 513{ 514 515 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 516 KASSERT(m->busy > 0, ("vm_page_io_finish: page %p is not busy", m)); 517 m->busy--; 518 if (m->busy == 0) 519 vm_page_flash(m); 520} 521 522/* 523 * Keep page from being freed by the page daemon 524 * much of the same effect as wiring, except much lower 525 * overhead and should be used only for *very* temporary 526 * holding ("wiring"). 527 */ 528void 529vm_page_hold(vm_page_t mem) 530{ 531 532 vm_page_lock_assert(mem, MA_OWNED); 533 mem->hold_count++; 534} 535 536void 537vm_page_unhold(vm_page_t mem) 538{ 539 540 vm_page_lock_assert(mem, MA_OWNED); 541 --mem->hold_count; 542 KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!")); 543 if (mem->hold_count == 0 && mem->queue == PQ_HOLD) 544 vm_page_free_toq(mem); 545} 546 547/* 548 * vm_page_unhold_pages: 549 * 550 * Unhold each of the pages that is referenced by the given array. 551 */ 552void 553vm_page_unhold_pages(vm_page_t *ma, int count) 554{ 555 struct mtx *mtx, *new_mtx; 556 557 mtx = NULL; 558 for (; count != 0; count--) { 559 /* 560 * Avoid releasing and reacquiring the same page lock. 561 */ 562 new_mtx = vm_page_lockptr(*ma); 563 if (mtx != new_mtx) { 564 if (mtx != NULL) 565 mtx_unlock(mtx); 566 mtx = new_mtx; 567 mtx_lock(mtx); 568 } 569 vm_page_unhold(*ma); 570 ma++; 571 } 572 if (mtx != NULL) 573 mtx_unlock(mtx); 574} 575 576vm_page_t 577PHYS_TO_VM_PAGE(vm_paddr_t pa) 578{ 579 vm_page_t m; 580 581#ifdef VM_PHYSSEG_SPARSE 582 m = vm_phys_paddr_to_vm_page(pa); 583 if (m == NULL) 584 m = vm_phys_fictitious_to_vm_page(pa); 585 return (m); 586#elif defined(VM_PHYSSEG_DENSE) 587 long pi; 588 589 pi = atop(pa); 590 if (pi >= first_page && (pi - first_page) < vm_page_array_size) { 591 m = &vm_page_array[pi - first_page]; 592 return (m); 593 } 594 return (vm_phys_fictitious_to_vm_page(pa)); 595#else 596#error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined." 597#endif 598} 599 600/* 601 * vm_page_getfake: 602 * 603 * Create a fictitious page with the specified physical address and 604 * memory attribute. The memory attribute is the only the machine- 605 * dependent aspect of a fictitious page that must be initialized. 606 */ 607vm_page_t 608vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr) 609{ 610 vm_page_t m; 611 612 m = uma_zalloc(fakepg_zone, M_WAITOK | M_ZERO); 613 vm_page_initfake(m, paddr, memattr); 614 return (m); 615} 616 617void 618vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr) 619{ 620 621 if ((m->flags & PG_FICTITIOUS) != 0) { 622 /* 623 * The page's memattr might have changed since the 624 * previous initialization. Update the pmap to the 625 * new memattr. 626 */ 627 goto memattr; 628 } 629 m->phys_addr = paddr; 630 m->queue = PQ_NONE; 631 /* Fictitious pages don't use "segind". */ 632 m->flags = PG_FICTITIOUS; 633 /* Fictitious pages don't use "order" or "pool". */ 634 m->oflags = VPO_BUSY | VPO_UNMANAGED; 635 m->wire_count = 1; 636memattr: 637 pmap_page_set_memattr(m, memattr); 638} 639 640/* 641 * vm_page_putfake: 642 * 643 * Release a fictitious page. 644 */ 645void 646vm_page_putfake(vm_page_t m) 647{ 648 649 KASSERT((m->oflags & VPO_UNMANAGED) != 0, ("managed %p", m)); 650 KASSERT((m->flags & PG_FICTITIOUS) != 0, 651 ("vm_page_putfake: bad page %p", m)); 652 uma_zfree(fakepg_zone, m); 653} 654 655/* 656 * vm_page_updatefake: 657 * 658 * Update the given fictitious page to the specified physical address and 659 * memory attribute. 660 */ 661void 662vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr) 663{ 664 665 KASSERT((m->flags & PG_FICTITIOUS) != 0, 666 ("vm_page_updatefake: bad page %p", m)); 667 m->phys_addr = paddr; 668 pmap_page_set_memattr(m, memattr); 669} 670 671/* 672 * vm_page_free: 673 * 674 * Free a page. 675 */ 676void 677vm_page_free(vm_page_t m) 678{ 679 680 m->flags &= ~PG_ZERO; 681 vm_page_free_toq(m); 682} 683 684/* 685 * vm_page_free_zero: 686 * 687 * Free a page to the zerod-pages queue 688 */ 689void 690vm_page_free_zero(vm_page_t m) 691{ 692 693 m->flags |= PG_ZERO; 694 vm_page_free_toq(m); 695} 696 697/* 698 * Unbusy and handle the page queueing for a page from the VOP_GETPAGES() 699 * array which is not the request page. 700 */ 701void 702vm_page_readahead_finish(vm_page_t m, int error) 703{ 704 705 if (error == 0) { 706 /* 707 * Since the page is not the requested page, whether 708 * it should be activated or deactivated is not 709 * obvious. Empirical results have shown that 710 * deactivating the page is usually the best choice, 711 * unless the page is wanted by another thread. 712 */ 713 if (m->oflags & VPO_WANTED) { 714 vm_page_lock(m); 715 vm_page_activate(m); 716 vm_page_unlock(m); 717 } else { 718 vm_page_lock(m); 719 vm_page_deactivate(m); 720 vm_page_unlock(m); 721 } 722 vm_page_wakeup(m); 723 } else { 724 vm_page_lock(m); 725 vm_page_free(m); 726 vm_page_unlock(m); 727 } 728} 729 730/* 731 * vm_page_sleep: 732 * 733 * Sleep and release the page and page queues locks. 734 * 735 * The object containing the given page must be locked. 736 */ 737void 738vm_page_sleep(vm_page_t m, const char *msg) 739{ 740 741 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 742 if (mtx_owned(&vm_page_queue_mtx)) 743 vm_page_unlock_queues(); 744 if (mtx_owned(vm_page_lockptr(m))) 745 vm_page_unlock(m); 746 747 /* 748 * It's possible that while we sleep, the page will get 749 * unbusied and freed. If we are holding the object 750 * lock, we will assume we hold a reference to the object 751 * such that even if m->object changes, we can re-lock 752 * it. 753 */ 754 m->oflags |= VPO_WANTED; 755 msleep(m, VM_OBJECT_MTX(m->object), PVM, msg, 0); 756} 757 758/* 759 * vm_page_dirty_KBI: [ internal use only ] 760 * 761 * Set all bits in the page's dirty field. 762 * 763 * The object containing the specified page must be locked if the 764 * call is made from the machine-independent layer. 765 * 766 * See vm_page_clear_dirty_mask(). 767 * 768 * This function should only be called by vm_page_dirty(). 769 */ 770void 771vm_page_dirty_KBI(vm_page_t m) 772{ 773 774 /* These assertions refer to this operation by its public name. */ 775 KASSERT((m->flags & PG_CACHED) == 0, 776 ("vm_page_dirty: page in cache!")); 777 KASSERT(!VM_PAGE_IS_FREE(m), 778 ("vm_page_dirty: page is free!")); 779 KASSERT(m->valid == VM_PAGE_BITS_ALL, 780 ("vm_page_dirty: page is invalid!")); 781 m->dirty = VM_PAGE_BITS_ALL; 782} 783 784/* 785 * vm_page_splay: 786 * 787 * Implements Sleator and Tarjan's top-down splay algorithm. Returns 788 * the vm_page containing the given pindex. If, however, that 789 * pindex is not found in the vm_object, returns a vm_page that is 790 * adjacent to the pindex, coming before or after it. 791 */ 792vm_page_t 793vm_page_splay(vm_pindex_t pindex, vm_page_t root) 794{ 795 struct vm_page dummy; 796 vm_page_t lefttreemax, righttreemin, y; 797 798 if (root == NULL) 799 return (root); 800 lefttreemax = righttreemin = &dummy; 801 for (;; root = y) { 802 if (pindex < root->pindex) { 803 if ((y = root->left) == NULL) 804 break; 805 if (pindex < y->pindex) { 806 /* Rotate right. */ 807 root->left = y->right; 808 y->right = root; 809 root = y; 810 if ((y = root->left) == NULL) 811 break; 812 } 813 /* Link into the new root's right tree. */ 814 righttreemin->left = root; 815 righttreemin = root; 816 } else if (pindex > root->pindex) { 817 if ((y = root->right) == NULL) 818 break; 819 if (pindex > y->pindex) { 820 /* Rotate left. */ 821 root->right = y->left; 822 y->left = root; 823 root = y; 824 if ((y = root->right) == NULL) 825 break; 826 } 827 /* Link into the new root's left tree. */ 828 lefttreemax->right = root; 829 lefttreemax = root; 830 } else 831 break; 832 } 833 /* Assemble the new root. */ 834 lefttreemax->right = root->left; 835 righttreemin->left = root->right; 836 root->left = dummy.right; 837 root->right = dummy.left; 838 return (root); 839} 840 841/* 842 * vm_page_insert: [ internal use only ] 843 * 844 * Inserts the given mem entry into the object and object list. 845 * 846 * The pagetables are not updated but will presumably fault the page 847 * in if necessary, or if a kernel page the caller will at some point 848 * enter the page into the kernel's pmap. We are not allowed to block 849 * here so we *can't* do this anyway. 850 * 851 * The object and page must be locked. 852 * This routine may not block. 853 */ 854void 855vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex) 856{ 857 vm_page_t root; 858 859 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 860 if (m->object != NULL) 861 panic("vm_page_insert: page already inserted"); 862 863 /* 864 * Record the object/offset pair in this page 865 */ 866 m->object = object; 867 m->pindex = pindex; 868 869 /* 870 * Now link into the object's ordered list of backed pages. 871 */ 872 root = object->root; 873 if (root == NULL) { 874 m->left = NULL; 875 m->right = NULL; 876 TAILQ_INSERT_TAIL(&object->memq, m, listq); 877 } else { 878 root = vm_page_splay(pindex, root); 879 if (pindex < root->pindex) { 880 m->left = root->left; 881 m->right = root; 882 root->left = NULL; 883 TAILQ_INSERT_BEFORE(root, m, listq); 884 } else if (pindex == root->pindex) 885 panic("vm_page_insert: offset already allocated"); 886 else { 887 m->right = root->right; 888 m->left = root; 889 root->right = NULL; 890 TAILQ_INSERT_AFTER(&object->memq, root, m, listq); 891 } 892 } 893 object->root = m; 894 895 /* 896 * show that the object has one more resident page. 897 */ 898 object->resident_page_count++; 899 /* 900 * Hold the vnode until the last page is released. 901 */ 902 if (object->resident_page_count == 1 && object->type == OBJT_VNODE) 903 vhold((struct vnode *)object->handle); 904 905 /* 906 * Since we are inserting a new and possibly dirty page, 907 * update the object's OBJ_MIGHTBEDIRTY flag. 908 */ 909 if (pmap_page_is_write_mapped(m)) 910 vm_object_set_writeable_dirty(object); 911} 912 913/* 914 * vm_page_remove: 915 * NOTE: used by device pager as well -wfj 916 * 917 * Removes the given mem entry from the object/offset-page 918 * table and the object page list, but do not invalidate/terminate 919 * the backing store. 920 * 921 * The object and page must be locked. 922 * The underlying pmap entry (if any) is NOT removed here. 923 * This routine may not block. 924 */ 925void 926vm_page_remove(vm_page_t m) 927{ 928 vm_object_t object; 929 vm_page_t next, prev, root; 930 931 if ((m->oflags & VPO_UNMANAGED) == 0) 932 vm_page_lock_assert(m, MA_OWNED); 933 if ((object = m->object) == NULL) 934 return; 935 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 936 if (m->oflags & VPO_BUSY) { 937 m->oflags &= ~VPO_BUSY; 938 vm_page_flash(m); 939 } 940 941 /* 942 * Now remove from the object's list of backed pages. 943 */ 944 if ((next = TAILQ_NEXT(m, listq)) != NULL && next->left == m) { 945 /* 946 * Since the page's successor in the list is also its parent 947 * in the tree, its right subtree must be empty. 948 */ 949 next->left = m->left; 950 KASSERT(m->right == NULL, 951 ("vm_page_remove: page %p has right child", m)); 952 } else if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL && 953 prev->right == m) { 954 /* 955 * Since the page's predecessor in the list is also its parent 956 * in the tree, its left subtree must be empty. 957 */ 958 KASSERT(m->left == NULL, 959 ("vm_page_remove: page %p has left child", m)); 960 prev->right = m->right; 961 } else { 962 if (m != object->root) 963 vm_page_splay(m->pindex, object->root); 964 if (m->left == NULL) 965 root = m->right; 966 else if (m->right == NULL) 967 root = m->left; 968 else { 969 /* 970 * Move the page's successor to the root, because 971 * pages are usually removed in ascending order. 972 */ 973 if (m->right != next) 974 vm_page_splay(m->pindex, m->right); 975 next->left = m->left; 976 root = next; 977 } 978 object->root = root; 979 } 980 TAILQ_REMOVE(&object->memq, m, listq); 981 982 /* 983 * And show that the object has one fewer resident page. 984 */ 985 object->resident_page_count--; 986 /* 987 * The vnode may now be recycled. 988 */ 989 if (object->resident_page_count == 0 && object->type == OBJT_VNODE) 990 vdrop((struct vnode *)object->handle); 991 992 m->object = NULL; 993} 994 995/* 996 * vm_page_lookup: 997 * 998 * Returns the page associated with the object/offset 999 * pair specified; if none is found, NULL is returned. 1000 * 1001 * The object must be locked. 1002 * This routine may not block. 1003 * This is a critical path routine 1004 */ 1005vm_page_t 1006vm_page_lookup(vm_object_t object, vm_pindex_t pindex) 1007{ 1008 vm_page_t m; 1009 1010 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1011 if ((m = object->root) != NULL && m->pindex != pindex) { 1012 m = vm_page_splay(pindex, m); 1013 if ((object->root = m)->pindex != pindex) 1014 m = NULL; 1015 } 1016 return (m); 1017} 1018 1019/* 1020 * vm_page_find_least: 1021 * 1022 * Returns the page associated with the object with least pindex 1023 * greater than or equal to the parameter pindex, or NULL. 1024 * 1025 * The object must be locked. 1026 * The routine may not block. 1027 */ 1028vm_page_t 1029vm_page_find_least(vm_object_t object, vm_pindex_t pindex) 1030{ 1031 vm_page_t m; 1032 1033 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1034 if ((m = TAILQ_FIRST(&object->memq)) != NULL) { 1035 if (m->pindex < pindex) { 1036 m = vm_page_splay(pindex, object->root); 1037 if ((object->root = m)->pindex < pindex) 1038 m = TAILQ_NEXT(m, listq); 1039 } 1040 } 1041 return (m); 1042} 1043 1044/* 1045 * Returns the given page's successor (by pindex) within the object if it is 1046 * resident; if none is found, NULL is returned. 1047 * 1048 * The object must be locked. 1049 */ 1050vm_page_t 1051vm_page_next(vm_page_t m) 1052{ 1053 vm_page_t next; 1054 1055 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1056 if ((next = TAILQ_NEXT(m, listq)) != NULL && 1057 next->pindex != m->pindex + 1) 1058 next = NULL; 1059 return (next); 1060} 1061 1062/* 1063 * Returns the given page's predecessor (by pindex) within the object if it is 1064 * resident; if none is found, NULL is returned. 1065 * 1066 * The object must be locked. 1067 */ 1068vm_page_t 1069vm_page_prev(vm_page_t m) 1070{ 1071 vm_page_t prev; 1072 1073 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1074 if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL && 1075 prev->pindex != m->pindex - 1) 1076 prev = NULL; 1077 return (prev); 1078} 1079 1080/* 1081 * vm_page_rename: 1082 * 1083 * Move the given memory entry from its 1084 * current object to the specified target object/offset. 1085 * 1086 * The object must be locked. 1087 * This routine may not block. 1088 * 1089 * Note: swap associated with the page must be invalidated by the move. We 1090 * have to do this for several reasons: (1) we aren't freeing the 1091 * page, (2) we are dirtying the page, (3) the VM system is probably 1092 * moving the page from object A to B, and will then later move 1093 * the backing store from A to B and we can't have a conflict. 1094 * 1095 * Note: we *always* dirty the page. It is necessary both for the 1096 * fact that we moved it, and because we may be invalidating 1097 * swap. If the page is on the cache, we have to deactivate it 1098 * or vm_page_dirty() will panic. Dirty pages are not allowed 1099 * on the cache. 1100 */ 1101void 1102vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex) 1103{ 1104 1105 vm_page_remove(m); 1106 vm_page_insert(m, new_object, new_pindex); 1107 vm_page_dirty(m); 1108} 1109 1110/* 1111 * Convert all of the given object's cached pages that have a 1112 * pindex within the given range into free pages. If the value 1113 * zero is given for "end", then the range's upper bound is 1114 * infinity. If the given object is backed by a vnode and it 1115 * transitions from having one or more cached pages to none, the 1116 * vnode's hold count is reduced. 1117 */ 1118void 1119vm_page_cache_free(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 1120{ 1121 vm_page_t m, m_next; 1122 boolean_t empty; 1123 1124 mtx_lock(&vm_page_queue_free_mtx); 1125 if (__predict_false(object->cache == NULL)) { 1126 mtx_unlock(&vm_page_queue_free_mtx); 1127 return; 1128 } 1129 m = object->cache = vm_page_splay(start, object->cache); 1130 if (m->pindex < start) { 1131 if (m->right == NULL) 1132 m = NULL; 1133 else { 1134 m_next = vm_page_splay(start, m->right); 1135 m_next->left = m; 1136 m->right = NULL; 1137 m = object->cache = m_next; 1138 } 1139 } 1140 1141 /* 1142 * At this point, "m" is either (1) a reference to the page 1143 * with the least pindex that is greater than or equal to 1144 * "start" or (2) NULL. 1145 */ 1146 for (; m != NULL && (m->pindex < end || end == 0); m = m_next) { 1147 /* 1148 * Find "m"'s successor and remove "m" from the 1149 * object's cache. 1150 */ 1151 if (m->right == NULL) { 1152 object->cache = m->left; 1153 m_next = NULL; 1154 } else { 1155 m_next = vm_page_splay(start, m->right); 1156 m_next->left = m->left; 1157 object->cache = m_next; 1158 } 1159 /* Convert "m" to a free page. */ 1160 m->object = NULL; 1161 m->valid = 0; 1162 /* Clear PG_CACHED and set PG_FREE. */ 1163 m->flags ^= PG_CACHED | PG_FREE; 1164 KASSERT((m->flags & (PG_CACHED | PG_FREE)) == PG_FREE, 1165 ("vm_page_cache_free: page %p has inconsistent flags", m)); 1166 cnt.v_cache_count--; 1167 cnt.v_free_count++; 1168 } 1169 empty = object->cache == NULL; 1170 mtx_unlock(&vm_page_queue_free_mtx); 1171 if (object->type == OBJT_VNODE && empty) 1172 vdrop(object->handle); 1173} 1174 1175/* 1176 * Returns the cached page that is associated with the given 1177 * object and offset. If, however, none exists, returns NULL. 1178 * 1179 * The free page queue must be locked. 1180 */ 1181static inline vm_page_t 1182vm_page_cache_lookup(vm_object_t object, vm_pindex_t pindex) 1183{ 1184 vm_page_t m; 1185 1186 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 1187 if ((m = object->cache) != NULL && m->pindex != pindex) { 1188 m = vm_page_splay(pindex, m); 1189 if ((object->cache = m)->pindex != pindex) 1190 m = NULL; 1191 } 1192 return (m); 1193} 1194 1195/* 1196 * Remove the given cached page from its containing object's 1197 * collection of cached pages. 1198 * 1199 * The free page queue must be locked. 1200 */ 1201static void 1202vm_page_cache_remove(vm_page_t m) 1203{ 1204 vm_object_t object; 1205 vm_page_t root; 1206 1207 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 1208 KASSERT((m->flags & PG_CACHED) != 0, 1209 ("vm_page_cache_remove: page %p is not cached", m)); 1210 object = m->object; 1211 if (m != object->cache) { 1212 root = vm_page_splay(m->pindex, object->cache); 1213 KASSERT(root == m, 1214 ("vm_page_cache_remove: page %p is not cached in object %p", 1215 m, object)); 1216 } 1217 if (m->left == NULL) 1218 root = m->right; 1219 else if (m->right == NULL) 1220 root = m->left; 1221 else { 1222 root = vm_page_splay(m->pindex, m->left); 1223 root->right = m->right; 1224 } 1225 object->cache = root; 1226 m->object = NULL; 1227 cnt.v_cache_count--; 1228} 1229 1230/* 1231 * Transfer all of the cached pages with offset greater than or 1232 * equal to 'offidxstart' from the original object's cache to the 1233 * new object's cache. However, any cached pages with offset 1234 * greater than or equal to the new object's size are kept in the 1235 * original object. Initially, the new object's cache must be 1236 * empty. Offset 'offidxstart' in the original object must 1237 * correspond to offset zero in the new object. 1238 * 1239 * The new object must be locked. 1240 */ 1241void 1242vm_page_cache_transfer(vm_object_t orig_object, vm_pindex_t offidxstart, 1243 vm_object_t new_object) 1244{ 1245 vm_page_t m, m_next; 1246 1247 /* 1248 * Insertion into an object's collection of cached pages 1249 * requires the object to be locked. In contrast, removal does 1250 * not. 1251 */ 1252 VM_OBJECT_LOCK_ASSERT(new_object, MA_OWNED); 1253 KASSERT(new_object->cache == NULL, 1254 ("vm_page_cache_transfer: object %p has cached pages", 1255 new_object)); 1256 mtx_lock(&vm_page_queue_free_mtx); 1257 if ((m = orig_object->cache) != NULL) { 1258 /* 1259 * Transfer all of the pages with offset greater than or 1260 * equal to 'offidxstart' from the original object's 1261 * cache to the new object's cache. 1262 */ 1263 m = vm_page_splay(offidxstart, m); 1264 if (m->pindex < offidxstart) { 1265 orig_object->cache = m; 1266 new_object->cache = m->right; 1267 m->right = NULL; 1268 } else { 1269 orig_object->cache = m->left; 1270 new_object->cache = m; 1271 m->left = NULL; 1272 } 1273 while ((m = new_object->cache) != NULL) { 1274 if ((m->pindex - offidxstart) >= new_object->size) { 1275 /* 1276 * Return all of the cached pages with 1277 * offset greater than or equal to the 1278 * new object's size to the original 1279 * object's cache. 1280 */ 1281 new_object->cache = m->left; 1282 m->left = orig_object->cache; 1283 orig_object->cache = m; 1284 break; 1285 } 1286 m_next = vm_page_splay(m->pindex, m->right); 1287 /* Update the page's object and offset. */ 1288 m->object = new_object; 1289 m->pindex -= offidxstart; 1290 if (m_next == NULL) 1291 break; 1292 m->right = NULL; 1293 m_next->left = m; 1294 new_object->cache = m_next; 1295 } 1296 KASSERT(new_object->cache == NULL || 1297 new_object->type == OBJT_SWAP, 1298 ("vm_page_cache_transfer: object %p's type is incompatible" 1299 " with cached pages", new_object)); 1300 } 1301 mtx_unlock(&vm_page_queue_free_mtx); 1302} 1303 1304/* 1305 * Returns TRUE if a cached page is associated with the given object and 1306 * offset, and FALSE otherwise. 1307 * 1308 * The object must be locked. 1309 */ 1310boolean_t 1311vm_page_is_cached(vm_object_t object, vm_pindex_t pindex) 1312{ 1313 vm_page_t m; 1314 1315 /* 1316 * Insertion into an object's collection of cached pages requires the 1317 * object to be locked. Therefore, if the object is locked and the 1318 * object's collection is empty, there is no need to acquire the free 1319 * page queues lock in order to prove that the specified page doesn't 1320 * exist. 1321 */ 1322 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1323 if (__predict_true(object->cache == NULL)) 1324 return (FALSE); 1325 mtx_lock(&vm_page_queue_free_mtx); 1326 m = vm_page_cache_lookup(object, pindex); 1327 mtx_unlock(&vm_page_queue_free_mtx); 1328 return (m != NULL); 1329} 1330 1331/* 1332 * vm_page_alloc: 1333 * 1334 * Allocate and return a page that is associated with the specified 1335 * object and offset pair. By default, this page has the flag VPO_BUSY 1336 * set. 1337 * 1338 * The caller must always specify an allocation class. 1339 * 1340 * allocation classes: 1341 * VM_ALLOC_NORMAL normal process request 1342 * VM_ALLOC_SYSTEM system *really* needs a page 1343 * VM_ALLOC_INTERRUPT interrupt time request 1344 * 1345 * optional allocation flags: 1346 * VM_ALLOC_COUNT(number) the number of additional pages that the caller 1347 * intends to allocate 1348 * VM_ALLOC_IFCACHED return page only if it is cached 1349 * VM_ALLOC_IFNOTCACHED return NULL, do not reactivate if the page 1350 * is cached 1351 * VM_ALLOC_NOBUSY do not set the flag VPO_BUSY on the page 1352 * VM_ALLOC_NODUMP do not include the page in a kernel core dump 1353 * VM_ALLOC_NOOBJ page is not associated with an object and 1354 * should not have the flag VPO_BUSY set 1355 * VM_ALLOC_WIRED wire the allocated page 1356 * VM_ALLOC_ZERO prefer a zeroed page 1357 * 1358 * This routine may not sleep. 1359 */ 1360vm_page_t 1361vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) 1362{ 1363 struct vnode *vp = NULL; 1364 vm_object_t m_object; 1365 vm_page_t m; 1366 int flags, req_class; 1367 1368 KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0), 1369 ("vm_page_alloc: inconsistent object/req")); 1370 if (object != NULL) 1371 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1372 1373 req_class = req & VM_ALLOC_CLASS_MASK; 1374 1375 /* 1376 * The page daemon is allowed to dig deeper into the free page list. 1377 */ 1378 if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT) 1379 req_class = VM_ALLOC_SYSTEM; 1380 1381 mtx_lock(&vm_page_queue_free_mtx); 1382 if (cnt.v_free_count + cnt.v_cache_count > cnt.v_free_reserved || 1383 (req_class == VM_ALLOC_SYSTEM && 1384 cnt.v_free_count + cnt.v_cache_count > cnt.v_interrupt_free_min) || 1385 (req_class == VM_ALLOC_INTERRUPT && 1386 cnt.v_free_count + cnt.v_cache_count > 0)) { 1387 /* 1388 * Allocate from the free queue if the number of free pages 1389 * exceeds the minimum for the request class. 1390 */ 1391 if (object != NULL && 1392 (m = vm_page_cache_lookup(object, pindex)) != NULL) { 1393 if ((req & VM_ALLOC_IFNOTCACHED) != 0) { 1394 mtx_unlock(&vm_page_queue_free_mtx); 1395 return (NULL); 1396 } 1397 if (vm_phys_unfree_page(m)) 1398 vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, 0); 1399#if VM_NRESERVLEVEL > 0 1400 else if (!vm_reserv_reactivate_page(m)) 1401#else 1402 else 1403#endif 1404 panic("vm_page_alloc: cache page %p is missing" 1405 " from the free queue", m); 1406 } else if ((req & VM_ALLOC_IFCACHED) != 0) { 1407 mtx_unlock(&vm_page_queue_free_mtx); 1408 return (NULL); 1409#if VM_NRESERVLEVEL > 0 1410 } else if (object == NULL || object->type == OBJT_DEVICE || 1411 object->type == OBJT_SG || 1412 (object->flags & OBJ_COLORED) == 0 || 1413 (m = vm_reserv_alloc_page(object, pindex)) == NULL) { 1414#else 1415 } else { 1416#endif 1417 m = vm_phys_alloc_pages(object != NULL ? 1418 VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0); 1419#if VM_NRESERVLEVEL > 0 1420 if (m == NULL && vm_reserv_reclaim_inactive()) { 1421 m = vm_phys_alloc_pages(object != NULL ? 1422 VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 1423 0); 1424 } 1425#endif 1426 } 1427 } else { 1428 /* 1429 * Not allocatable, give up. 1430 */ 1431 mtx_unlock(&vm_page_queue_free_mtx); 1432 atomic_add_int(&vm_pageout_deficit, 1433 max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1)); 1434 pagedaemon_wakeup(); 1435 return (NULL); 1436 } 1437 1438 /* 1439 * At this point we had better have found a good page. 1440 */ 1441 KASSERT(m != NULL, ("vm_page_alloc: missing page")); 1442 KASSERT(m->queue == PQ_NONE, 1443 ("vm_page_alloc: page %p has unexpected queue %d", m, m->queue)); 1444 KASSERT(m->wire_count == 0, ("vm_page_alloc: page %p is wired", m)); 1445 KASSERT(m->hold_count == 0, ("vm_page_alloc: page %p is held", m)); 1446 KASSERT(m->busy == 0, ("vm_page_alloc: page %p is busy", m)); 1447 KASSERT(m->dirty == 0, ("vm_page_alloc: page %p is dirty", m)); 1448 KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT, 1449 ("vm_page_alloc: page %p has unexpected memattr %d", m, 1450 pmap_page_get_memattr(m))); 1451 if ((m->flags & PG_CACHED) != 0) { 1452 KASSERT((m->flags & PG_ZERO) == 0, 1453 ("vm_page_alloc: cached page %p is PG_ZERO", m)); 1454 KASSERT(m->valid != 0, 1455 ("vm_page_alloc: cached page %p is invalid", m)); 1456 if (m->object == object && m->pindex == pindex) 1457 cnt.v_reactivated++; 1458 else 1459 m->valid = 0; 1460 m_object = m->object; 1461 vm_page_cache_remove(m); 1462 if (m_object->type == OBJT_VNODE && m_object->cache == NULL) 1463 vp = m_object->handle; 1464 } else { 1465 KASSERT(VM_PAGE_IS_FREE(m), 1466 ("vm_page_alloc: page %p is not free", m)); 1467 KASSERT(m->valid == 0, 1468 ("vm_page_alloc: free page %p is valid", m)); 1469 cnt.v_free_count--; 1470 } 1471 1472 /* 1473 * Only the PG_ZERO flag is inherited. The PG_CACHED or PG_FREE flag 1474 * must be cleared before the free page queues lock is released. 1475 */ 1476 flags = 0; 1477 if (req & VM_ALLOC_NODUMP) 1478 flags |= PG_NODUMP; 1479 if (m->flags & PG_ZERO) { 1480 vm_page_zero_count--; 1481 if (req & VM_ALLOC_ZERO) 1482 flags = PG_ZERO; 1483 } 1484 m->flags = flags; 1485 mtx_unlock(&vm_page_queue_free_mtx); 1486 m->aflags = 0; 1487 if (object == NULL || object->type == OBJT_PHYS) 1488 m->oflags = VPO_UNMANAGED; 1489 else 1490 m->oflags = 0; 1491 if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ)) == 0) 1492 m->oflags |= VPO_BUSY; 1493 if (req & VM_ALLOC_WIRED) { 1494 /* 1495 * The page lock is not required for wiring a page until that 1496 * page is inserted into the object. 1497 */ 1498 atomic_add_int(&cnt.v_wire_count, 1); 1499 m->wire_count = 1; 1500 } 1501 m->act_count = 0; 1502 1503 if (object != NULL) { 1504 /* Ignore device objects; the pager sets "memattr" for them. */ 1505 if (object->memattr != VM_MEMATTR_DEFAULT && 1506 object->type != OBJT_DEVICE && object->type != OBJT_SG) 1507 pmap_page_set_memattr(m, object->memattr); 1508 vm_page_insert(m, object, pindex); 1509 } else 1510 m->pindex = pindex; 1511 1512 /* 1513 * The following call to vdrop() must come after the above call 1514 * to vm_page_insert() in case both affect the same object and 1515 * vnode. Otherwise, the affected vnode's hold count could 1516 * temporarily become zero. 1517 */ 1518 if (vp != NULL) 1519 vdrop(vp); 1520 1521 /* 1522 * Don't wakeup too often - wakeup the pageout daemon when 1523 * we would be nearly out of memory. 1524 */ 1525 if (vm_paging_needed()) 1526 pagedaemon_wakeup(); 1527 1528 return (m); 1529} 1530 1531/* 1532 * vm_page_alloc_contig: 1533 * 1534 * Allocate a contiguous set of physical pages of the given size "npages" 1535 * from the free lists. All of the physical pages must be at or above 1536 * the given physical address "low" and below the given physical address 1537 * "high". The given value "alignment" determines the alignment of the 1538 * first physical page in the set. If the given value "boundary" is 1539 * non-zero, then the set of physical pages cannot cross any physical 1540 * address boundary that is a multiple of that value. Both "alignment" 1541 * and "boundary" must be a power of two. 1542 * 1543 * If the specified memory attribute, "memattr", is VM_MEMATTR_DEFAULT, 1544 * then the memory attribute setting for the physical pages is configured 1545 * to the object's memory attribute setting. Otherwise, the memory 1546 * attribute setting for the physical pages is configured to "memattr", 1547 * overriding the object's memory attribute setting. However, if the 1548 * object's memory attribute setting is not VM_MEMATTR_DEFAULT, then the 1549 * memory attribute setting for the physical pages cannot be configured 1550 * to VM_MEMATTR_DEFAULT. 1551 * 1552 * The caller must always specify an allocation class. 1553 * 1554 * allocation classes: 1555 * VM_ALLOC_NORMAL normal process request 1556 * VM_ALLOC_SYSTEM system *really* needs a page 1557 * VM_ALLOC_INTERRUPT interrupt time request 1558 * 1559 * optional allocation flags: 1560 * VM_ALLOC_NOBUSY do not set the flag VPO_BUSY on the page 1561 * VM_ALLOC_NOOBJ page is not associated with an object and 1562 * should not have the flag VPO_BUSY set 1563 * VM_ALLOC_WIRED wire the allocated page 1564 * VM_ALLOC_ZERO prefer a zeroed page 1565 * 1566 * This routine may not sleep. 1567 */ 1568vm_page_t 1569vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req, 1570 u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, 1571 vm_paddr_t boundary, vm_memattr_t memattr) 1572{ 1573 struct vnode *drop; 1574 vm_page_t deferred_vdrop_list, m, m_ret; 1575 u_int flags, oflags; 1576 int req_class; 1577 1578 KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0), 1579 ("vm_page_alloc_contig: inconsistent object/req")); 1580 if (object != NULL) { 1581 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1582 KASSERT(object->type == OBJT_PHYS, 1583 ("vm_page_alloc_contig: object %p isn't OBJT_PHYS", 1584 object)); 1585 } 1586 KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero")); 1587 req_class = req & VM_ALLOC_CLASS_MASK; 1588 1589 /* 1590 * The page daemon is allowed to dig deeper into the free page list. 1591 */ 1592 if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT) 1593 req_class = VM_ALLOC_SYSTEM; 1594 1595 deferred_vdrop_list = NULL; 1596 mtx_lock(&vm_page_queue_free_mtx); 1597 if (cnt.v_free_count + cnt.v_cache_count >= npages + 1598 cnt.v_free_reserved || (req_class == VM_ALLOC_SYSTEM && 1599 cnt.v_free_count + cnt.v_cache_count >= npages + 1600 cnt.v_interrupt_free_min) || (req_class == VM_ALLOC_INTERRUPT && 1601 cnt.v_free_count + cnt.v_cache_count >= npages)) { 1602#if VM_NRESERVLEVEL > 0 1603retry: 1604 if (object == NULL || (object->flags & OBJ_COLORED) == 0 || 1605 (m_ret = vm_reserv_alloc_contig(object, pindex, npages, 1606 low, high, alignment, boundary)) == NULL) 1607#endif 1608 m_ret = vm_phys_alloc_contig(npages, low, high, 1609 alignment, boundary); 1610 } else { 1611 mtx_unlock(&vm_page_queue_free_mtx); 1612 atomic_add_int(&vm_pageout_deficit, npages); 1613 pagedaemon_wakeup(); 1614 return (NULL); 1615 } 1616 if (m_ret != NULL) 1617 for (m = m_ret; m < &m_ret[npages]; m++) { 1618 drop = vm_page_alloc_init(m); 1619 if (drop != NULL) { 1620 /* 1621 * Enqueue the vnode for deferred vdrop(). 1622 * 1623 * Once the pages are removed from the free 1624 * page list, "pageq" can be safely abused to 1625 * construct a short-lived list of vnodes. 1626 */ 1627 m->pageq.tqe_prev = (void *)drop; 1628 m->pageq.tqe_next = deferred_vdrop_list; 1629 deferred_vdrop_list = m; 1630 } 1631 } 1632 else { 1633#if VM_NRESERVLEVEL > 0 1634 if (vm_reserv_reclaim_contig(npages, low, high, alignment, 1635 boundary)) 1636 goto retry; 1637#endif 1638 } 1639 mtx_unlock(&vm_page_queue_free_mtx); 1640 if (m_ret == NULL) 1641 return (NULL); 1642 1643 /* 1644 * Initialize the pages. Only the PG_ZERO flag is inherited. 1645 */ 1646 flags = 0; 1647 if ((req & VM_ALLOC_ZERO) != 0) 1648 flags = PG_ZERO; 1649 if ((req & VM_ALLOC_NODUMP) != 0) 1650 flags |= PG_NODUMP; 1651 if ((req & VM_ALLOC_WIRED) != 0) 1652 atomic_add_int(&cnt.v_wire_count, npages); 1653 oflags = VPO_UNMANAGED; 1654 if (object != NULL) { 1655 if ((req & VM_ALLOC_NOBUSY) == 0) 1656 oflags |= VPO_BUSY; 1657 if (object->memattr != VM_MEMATTR_DEFAULT && 1658 memattr == VM_MEMATTR_DEFAULT) 1659 memattr = object->memattr; 1660 } 1661 for (m = m_ret; m < &m_ret[npages]; m++) { 1662 m->aflags = 0; 1663 m->flags = (m->flags | PG_NODUMP) & flags; 1664 if ((req & VM_ALLOC_WIRED) != 0) 1665 m->wire_count = 1; 1666 /* Unmanaged pages don't use "act_count". */ 1667 m->oflags = oflags; 1668 if (memattr != VM_MEMATTR_DEFAULT) 1669 pmap_page_set_memattr(m, memattr); 1670 if (object != NULL) 1671 vm_page_insert(m, object, pindex); 1672 else 1673 m->pindex = pindex; 1674 pindex++; 1675 } 1676 while (deferred_vdrop_list != NULL) { 1677 vdrop((struct vnode *)deferred_vdrop_list->pageq.tqe_prev); 1678 deferred_vdrop_list = deferred_vdrop_list->pageq.tqe_next; 1679 } 1680 if (vm_paging_needed()) 1681 pagedaemon_wakeup(); 1682 return (m_ret); 1683} 1684 1685/* 1686 * Initialize a page that has been freshly dequeued from a freelist. 1687 * The caller has to drop the vnode returned, if it is not NULL. 1688 * 1689 * This function may only be used to initialize unmanaged pages. 1690 * 1691 * To be called with vm_page_queue_free_mtx held. 1692 */ 1693static struct vnode * 1694vm_page_alloc_init(vm_page_t m) 1695{ 1696 struct vnode *drop; 1697 vm_object_t m_object; 1698 1699 KASSERT(m->queue == PQ_NONE, 1700 ("vm_page_alloc_init: page %p has unexpected queue %d", 1701 m, m->queue)); 1702 KASSERT(m->wire_count == 0, 1703 ("vm_page_alloc_init: page %p is wired", m)); 1704 KASSERT(m->hold_count == 0, 1705 ("vm_page_alloc_init: page %p is held", m)); 1706 KASSERT(m->busy == 0, 1707 ("vm_page_alloc_init: page %p is busy", m)); 1708 KASSERT(m->dirty == 0, 1709 ("vm_page_alloc_init: page %p is dirty", m)); 1710 KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT, 1711 ("vm_page_alloc_init: page %p has unexpected memattr %d", 1712 m, pmap_page_get_memattr(m))); 1713 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 1714 drop = NULL; 1715 if ((m->flags & PG_CACHED) != 0) { 1716 KASSERT((m->flags & PG_ZERO) == 0, 1717 ("vm_page_alloc_init: cached page %p is PG_ZERO", m)); 1718 m->valid = 0; 1719 m_object = m->object; 1720 vm_page_cache_remove(m); 1721 if (m_object->type == OBJT_VNODE && m_object->cache == NULL) 1722 drop = m_object->handle; 1723 } else { 1724 KASSERT(VM_PAGE_IS_FREE(m), 1725 ("vm_page_alloc_init: page %p is not free", m)); 1726 KASSERT(m->valid == 0, 1727 ("vm_page_alloc_init: free page %p is valid", m)); 1728 cnt.v_free_count--; 1729 if ((m->flags & PG_ZERO) != 0) 1730 vm_page_zero_count--; 1731 } 1732 /* Don't clear the PG_ZERO flag; we'll need it later. */ 1733 m->flags &= PG_ZERO; 1734 return (drop); 1735} 1736 1737/* 1738 * vm_page_alloc_freelist: 1739 * 1740 * Allocate a physical page from the specified free page list. 1741 * 1742 * The caller must always specify an allocation class. 1743 * 1744 * allocation classes: 1745 * VM_ALLOC_NORMAL normal process request 1746 * VM_ALLOC_SYSTEM system *really* needs a page 1747 * VM_ALLOC_INTERRUPT interrupt time request 1748 * 1749 * optional allocation flags: 1750 * VM_ALLOC_COUNT(number) the number of additional pages that the caller 1751 * intends to allocate 1752 * VM_ALLOC_WIRED wire the allocated page 1753 * VM_ALLOC_ZERO prefer a zeroed page 1754 * 1755 * This routine may not sleep. 1756 */ 1757vm_page_t 1758vm_page_alloc_freelist(int flind, int req) 1759{ 1760 struct vnode *drop; 1761 vm_page_t m; 1762 u_int flags; 1763 int req_class; 1764 1765 req_class = req & VM_ALLOC_CLASS_MASK; 1766 1767 /* 1768 * The page daemon is allowed to dig deeper into the free page list. 1769 */ 1770 if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT) 1771 req_class = VM_ALLOC_SYSTEM; 1772 1773 /* 1774 * Do not allocate reserved pages unless the req has asked for it. 1775 */ 1776 mtx_lock(&vm_page_queue_free_mtx); 1777 if (cnt.v_free_count + cnt.v_cache_count > cnt.v_free_reserved || 1778 (req_class == VM_ALLOC_SYSTEM && 1779 cnt.v_free_count + cnt.v_cache_count > cnt.v_interrupt_free_min) || 1780 (req_class == VM_ALLOC_INTERRUPT && 1781 cnt.v_free_count + cnt.v_cache_count > 0)) 1782 m = vm_phys_alloc_freelist_pages(flind, VM_FREEPOOL_DIRECT, 0); 1783 else { 1784 mtx_unlock(&vm_page_queue_free_mtx); 1785 atomic_add_int(&vm_pageout_deficit, 1786 max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1)); 1787 pagedaemon_wakeup(); 1788 return (NULL); 1789 } 1790 if (m == NULL) { 1791 mtx_unlock(&vm_page_queue_free_mtx); 1792 return (NULL); 1793 } 1794 drop = vm_page_alloc_init(m); 1795 mtx_unlock(&vm_page_queue_free_mtx); 1796 1797 /* 1798 * Initialize the page. Only the PG_ZERO flag is inherited. 1799 */ 1800 m->aflags = 0; 1801 flags = 0; 1802 if ((req & VM_ALLOC_ZERO) != 0) 1803 flags = PG_ZERO; 1804 m->flags &= flags; 1805 if ((req & VM_ALLOC_WIRED) != 0) { 1806 /* 1807 * The page lock is not required for wiring a page that does 1808 * not belong to an object. 1809 */ 1810 atomic_add_int(&cnt.v_wire_count, 1); 1811 m->wire_count = 1; 1812 } 1813 /* Unmanaged pages don't use "act_count". */ 1814 m->oflags = VPO_UNMANAGED; 1815 if (drop != NULL) 1816 vdrop(drop); 1817 if (vm_paging_needed()) 1818 pagedaemon_wakeup(); 1819 return (m); 1820} 1821 1822/* 1823 * vm_wait: (also see VM_WAIT macro) 1824 * 1825 * Block until free pages are available for allocation 1826 * - Called in various places before memory allocations. 1827 */ 1828void 1829vm_wait(void) 1830{ 1831 1832 mtx_lock(&vm_page_queue_free_mtx); 1833 if (curproc == pageproc) { 1834 vm_pageout_pages_needed = 1; 1835 msleep(&vm_pageout_pages_needed, &vm_page_queue_free_mtx, 1836 PDROP | PSWP, "VMWait", 0); 1837 } else { 1838 if (!vm_pages_needed) { 1839 vm_pages_needed = 1; 1840 wakeup(&vm_pages_needed); 1841 } 1842 msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PVM, 1843 "vmwait", 0); 1844 } 1845} 1846 1847/* 1848 * vm_waitpfault: (also see VM_WAITPFAULT macro) 1849 * 1850 * Block until free pages are available for allocation 1851 * - Called only in vm_fault so that processes page faulting 1852 * can be easily tracked. 1853 * - Sleeps at a lower priority than vm_wait() so that vm_wait()ing 1854 * processes will be able to grab memory first. Do not change 1855 * this balance without careful testing first. 1856 */ 1857void 1858vm_waitpfault(void) 1859{ 1860 1861 mtx_lock(&vm_page_queue_free_mtx); 1862 if (!vm_pages_needed) { 1863 vm_pages_needed = 1; 1864 wakeup(&vm_pages_needed); 1865 } 1866 msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PUSER, 1867 "pfault", 0); 1868} 1869 1870/* 1871 * vm_page_requeue: 1872 * 1873 * Move the given page to the tail of its present page queue. 1874 * 1875 * The page queues must be locked. 1876 */ 1877void 1878vm_page_requeue(vm_page_t m) 1879{ 1880 struct vpgqueues *vpq; 1881 int queue; 1882 1883 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1884 queue = m->queue; 1885 KASSERT(queue != PQ_NONE, 1886 ("vm_page_requeue: page %p is not queued", m)); 1887 vpq = &vm_page_queues[queue]; 1888 TAILQ_REMOVE(&vpq->pl, m, pageq); 1889 TAILQ_INSERT_TAIL(&vpq->pl, m, pageq); 1890} 1891 1892/* 1893 * vm_page_queue_remove: 1894 * 1895 * Remove the given page from the specified queue. 1896 * 1897 * The page and page queues must be locked. 1898 */ 1899static __inline void 1900vm_page_queue_remove(int queue, vm_page_t m) 1901{ 1902 struct vpgqueues *pq; 1903 1904 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1905 vm_page_lock_assert(m, MA_OWNED); 1906 pq = &vm_page_queues[queue]; 1907 TAILQ_REMOVE(&pq->pl, m, pageq); 1908 (*pq->cnt)--; 1909} 1910 1911/* 1912 * vm_pageq_remove: 1913 * 1914 * Remove a page from its queue. 1915 * 1916 * The given page must be locked. 1917 * This routine may not block. 1918 */ 1919void 1920vm_pageq_remove(vm_page_t m) 1921{ 1922 int queue; 1923 1924 vm_page_lock_assert(m, MA_OWNED); 1925 if ((queue = m->queue) != PQ_NONE) { 1926 vm_page_lock_queues(); 1927 m->queue = PQ_NONE; 1928 vm_page_queue_remove(queue, m); 1929 vm_page_unlock_queues(); 1930 } 1931} 1932 1933/* 1934 * vm_page_enqueue: 1935 * 1936 * Add the given page to the specified queue. 1937 * 1938 * The page queues must be locked. 1939 */ 1940static void 1941vm_page_enqueue(int queue, vm_page_t m) 1942{ 1943 struct vpgqueues *vpq; 1944 1945 vpq = &vm_page_queues[queue]; 1946 m->queue = queue; 1947 TAILQ_INSERT_TAIL(&vpq->pl, m, pageq); 1948 ++*vpq->cnt; 1949} 1950 1951/* 1952 * vm_page_activate: 1953 * 1954 * Put the specified page on the active list (if appropriate). 1955 * Ensure that act_count is at least ACT_INIT but do not otherwise 1956 * mess with it. 1957 * 1958 * The page must be locked. 1959 * This routine may not block. 1960 */ 1961void 1962vm_page_activate(vm_page_t m) 1963{ 1964 int queue; 1965 1966 vm_page_lock_assert(m, MA_OWNED); 1967 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1968 if ((queue = m->queue) != PQ_ACTIVE) { 1969 if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) { 1970 if (m->act_count < ACT_INIT) 1971 m->act_count = ACT_INIT; 1972 vm_page_lock_queues(); 1973 if (queue != PQ_NONE) 1974 vm_page_queue_remove(queue, m); 1975 vm_page_enqueue(PQ_ACTIVE, m); 1976 vm_page_unlock_queues(); 1977 } else 1978 KASSERT(queue == PQ_NONE, 1979 ("vm_page_activate: wired page %p is queued", m)); 1980 } else { 1981 if (m->act_count < ACT_INIT) 1982 m->act_count = ACT_INIT; 1983 } 1984} 1985 1986/* 1987 * vm_page_free_wakeup: 1988 * 1989 * Helper routine for vm_page_free_toq() and vm_page_cache(). This 1990 * routine is called when a page has been added to the cache or free 1991 * queues. 1992 * 1993 * The page queues must be locked. 1994 * This routine may not block. 1995 */ 1996static inline void 1997vm_page_free_wakeup(void) 1998{ 1999 2000 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 2001 /* 2002 * if pageout daemon needs pages, then tell it that there are 2003 * some free. 2004 */ 2005 if (vm_pageout_pages_needed && 2006 cnt.v_cache_count + cnt.v_free_count >= cnt.v_pageout_free_min) { 2007 wakeup(&vm_pageout_pages_needed); 2008 vm_pageout_pages_needed = 0; 2009 } 2010 /* 2011 * wakeup processes that are waiting on memory if we hit a 2012 * high water mark. And wakeup scheduler process if we have 2013 * lots of memory. this process will swapin processes. 2014 */ 2015 if (vm_pages_needed && !vm_page_count_min()) { 2016 vm_pages_needed = 0; 2017 wakeup(&cnt.v_free_count); 2018 } 2019} 2020 2021/* 2022 * vm_page_free_toq: 2023 * 2024 * Returns the given page to the free list, 2025 * disassociating it with any VM object. 2026 * 2027 * Object and page must be locked prior to entry. 2028 * This routine may not block. 2029 */ 2030 2031void 2032vm_page_free_toq(vm_page_t m) 2033{ 2034 2035 if ((m->oflags & VPO_UNMANAGED) == 0) { 2036 vm_page_lock_assert(m, MA_OWNED); 2037 KASSERT(!pmap_page_is_mapped(m), 2038 ("vm_page_free_toq: freeing mapped page %p", m)); 2039 } 2040 PCPU_INC(cnt.v_tfree); 2041 2042 if (VM_PAGE_IS_FREE(m)) 2043 panic("vm_page_free: freeing free page %p", m); 2044 else if (m->busy != 0) 2045 panic("vm_page_free: freeing busy page %p", m); 2046 2047 /* 2048 * unqueue, then remove page. Note that we cannot destroy 2049 * the page here because we do not want to call the pager's 2050 * callback routine until after we've put the page on the 2051 * appropriate free queue. 2052 */ 2053 if ((m->oflags & VPO_UNMANAGED) == 0) 2054 vm_pageq_remove(m); 2055 vm_page_remove(m); 2056 2057 /* 2058 * If fictitious remove object association and 2059 * return, otherwise delay object association removal. 2060 */ 2061 if ((m->flags & PG_FICTITIOUS) != 0) { 2062 return; 2063 } 2064 2065 m->valid = 0; 2066 vm_page_undirty(m); 2067 2068 if (m->wire_count != 0) 2069 panic("vm_page_free: freeing wired page %p", m); 2070 if (m->hold_count != 0) { 2071 m->flags &= ~PG_ZERO; 2072 vm_page_lock_queues(); 2073 vm_page_enqueue(PQ_HOLD, m); 2074 vm_page_unlock_queues(); 2075 } else { 2076 /* 2077 * Restore the default memory attribute to the page. 2078 */ 2079 if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT) 2080 pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT); 2081 2082 /* 2083 * Insert the page into the physical memory allocator's 2084 * cache/free page queues. 2085 */ 2086 mtx_lock(&vm_page_queue_free_mtx); 2087 m->flags |= PG_FREE; 2088 cnt.v_free_count++; 2089#if VM_NRESERVLEVEL > 0 2090 if (!vm_reserv_free_page(m)) 2091#else 2092 if (TRUE) 2093#endif 2094 vm_phys_free_pages(m, 0); 2095 if ((m->flags & PG_ZERO) != 0) 2096 ++vm_page_zero_count; 2097 else 2098 vm_page_zero_idle_wakeup(); 2099 vm_page_free_wakeup(); 2100 mtx_unlock(&vm_page_queue_free_mtx); 2101 } 2102} 2103 2104/* 2105 * vm_page_wire: 2106 * 2107 * Mark this page as wired down by yet 2108 * another map, removing it from paging queues 2109 * as necessary. 2110 * 2111 * If the page is fictitious, then its wire count must remain one. 2112 * 2113 * The page must be locked. 2114 * This routine may not block. 2115 */ 2116void 2117vm_page_wire(vm_page_t m) 2118{ 2119 2120 /* 2121 * Only bump the wire statistics if the page is not already wired, 2122 * and only unqueue the page if it is on some queue (if it is unmanaged 2123 * it is already off the queues). 2124 */ 2125 vm_page_lock_assert(m, MA_OWNED); 2126 if ((m->flags & PG_FICTITIOUS) != 0) { 2127 KASSERT(m->wire_count == 1, 2128 ("vm_page_wire: fictitious page %p's wire count isn't one", 2129 m)); 2130 return; 2131 } 2132 if (m->wire_count == 0) { 2133 if ((m->oflags & VPO_UNMANAGED) == 0) 2134 vm_pageq_remove(m); 2135 atomic_add_int(&cnt.v_wire_count, 1); 2136 } 2137 m->wire_count++; 2138 KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m)); 2139} 2140 2141/* 2142 * vm_page_unwire: 2143 * 2144 * Release one wiring of the specified page, potentially enabling it to be 2145 * paged again. If paging is enabled, then the value of the parameter 2146 * "activate" determines to which queue the page is added. If "activate" is 2147 * non-zero, then the page is added to the active queue. Otherwise, it is 2148 * added to the inactive queue. 2149 * 2150 * However, unless the page belongs to an object, it is not enqueued because 2151 * it cannot be paged out. 2152 * 2153 * If a page is fictitious, then its wire count must alway be one. 2154 * 2155 * A managed page must be locked. 2156 */ 2157void 2158vm_page_unwire(vm_page_t m, int activate) 2159{ 2160 2161 if ((m->oflags & VPO_UNMANAGED) == 0) 2162 vm_page_lock_assert(m, MA_OWNED); 2163 if ((m->flags & PG_FICTITIOUS) != 0) { 2164 KASSERT(m->wire_count == 1, 2165 ("vm_page_unwire: fictitious page %p's wire count isn't one", m)); 2166 return; 2167 } 2168 if (m->wire_count > 0) { 2169 m->wire_count--; 2170 if (m->wire_count == 0) { 2171 atomic_subtract_int(&cnt.v_wire_count, 1); 2172 if ((m->oflags & VPO_UNMANAGED) != 0 || 2173 m->object == NULL) 2174 return; 2175 if (!activate) 2176 m->flags &= ~PG_WINATCFLS; 2177 vm_page_lock_queues(); 2178 vm_page_enqueue(activate ? PQ_ACTIVE : PQ_INACTIVE, m); 2179 vm_page_unlock_queues(); 2180 } 2181 } else 2182 panic("vm_page_unwire: page %p's wire count is zero", m); 2183} 2184 2185/* 2186 * Move the specified page to the inactive queue. 2187 * 2188 * Many pages placed on the inactive queue should actually go 2189 * into the cache, but it is difficult to figure out which. What 2190 * we do instead, if the inactive target is well met, is to put 2191 * clean pages at the head of the inactive queue instead of the tail. 2192 * This will cause them to be moved to the cache more quickly and 2193 * if not actively re-referenced, reclaimed more quickly. If we just 2194 * stick these pages at the end of the inactive queue, heavy filesystem 2195 * meta-data accesses can cause an unnecessary paging load on memory bound 2196 * processes. This optimization causes one-time-use metadata to be 2197 * reused more quickly. 2198 * 2199 * Normally athead is 0 resulting in LRU operation. athead is set 2200 * to 1 if we want this page to be 'as if it were placed in the cache', 2201 * except without unmapping it from the process address space. 2202 * 2203 * This routine may not block. 2204 */ 2205static inline void 2206_vm_page_deactivate(vm_page_t m, int athead) 2207{ 2208 int queue; 2209 2210 vm_page_lock_assert(m, MA_OWNED); 2211 2212 /* 2213 * Ignore if already inactive. 2214 */ 2215 if ((queue = m->queue) == PQ_INACTIVE) 2216 return; 2217 if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) { 2218 m->flags &= ~PG_WINATCFLS; 2219 vm_page_lock_queues(); 2220 if (queue != PQ_NONE) 2221 vm_page_queue_remove(queue, m); 2222 if (athead) 2223 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE].pl, m, 2224 pageq); 2225 else 2226 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, 2227 pageq); 2228 m->queue = PQ_INACTIVE; 2229 cnt.v_inactive_count++; 2230 vm_page_unlock_queues(); 2231 } 2232} 2233 2234/* 2235 * Move the specified page to the inactive queue. 2236 * 2237 * The page must be locked. 2238 */ 2239void 2240vm_page_deactivate(vm_page_t m) 2241{ 2242 2243 _vm_page_deactivate(m, 0); 2244} 2245 2246/* 2247 * vm_page_try_to_cache: 2248 * 2249 * Returns 0 on failure, 1 on success 2250 */ 2251int 2252vm_page_try_to_cache(vm_page_t m) 2253{ 2254 2255 vm_page_lock_assert(m, MA_OWNED); 2256 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2257 if (m->dirty || m->hold_count || m->busy || m->wire_count || 2258 (m->oflags & (VPO_BUSY | VPO_UNMANAGED)) != 0) 2259 return (0); 2260 pmap_remove_all(m); 2261 if (m->dirty) 2262 return (0); 2263 vm_page_cache(m); 2264 return (1); 2265} 2266 2267/* 2268 * vm_page_try_to_free() 2269 * 2270 * Attempt to free the page. If we cannot free it, we do nothing. 2271 * 1 is returned on success, 0 on failure. 2272 */ 2273int 2274vm_page_try_to_free(vm_page_t m) 2275{ 2276 2277 vm_page_lock_assert(m, MA_OWNED); 2278 if (m->object != NULL) 2279 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2280 if (m->dirty || m->hold_count || m->busy || m->wire_count || 2281 (m->oflags & (VPO_BUSY | VPO_UNMANAGED)) != 0) 2282 return (0); 2283 pmap_remove_all(m); 2284 if (m->dirty) 2285 return (0); 2286 vm_page_free(m); 2287 return (1); 2288} 2289 2290/* 2291 * vm_page_cache 2292 * 2293 * Put the specified page onto the page cache queue (if appropriate). 2294 * 2295 * This routine may not block. 2296 */ 2297void 2298vm_page_cache(vm_page_t m) 2299{ 2300 vm_object_t object; 2301 vm_page_t next, prev, root; 2302 2303 vm_page_lock_assert(m, MA_OWNED); 2304 object = m->object; 2305 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2306 if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) || m->busy || 2307 m->hold_count || m->wire_count) 2308 panic("vm_page_cache: attempting to cache busy page"); 2309 pmap_remove_all(m); 2310 if (m->dirty != 0) 2311 panic("vm_page_cache: page %p is dirty", m); 2312 if (m->valid == 0 || object->type == OBJT_DEFAULT || 2313 (object->type == OBJT_SWAP && 2314 !vm_pager_has_page(object, m->pindex, NULL, NULL))) { 2315 /* 2316 * Hypothesis: A cache-elgible page belonging to a 2317 * default object or swap object but without a backing 2318 * store must be zero filled. 2319 */ 2320 vm_page_free(m); 2321 return; 2322 } 2323 KASSERT((m->flags & PG_CACHED) == 0, 2324 ("vm_page_cache: page %p is already cached", m)); 2325 PCPU_INC(cnt.v_tcached); 2326 2327 /* 2328 * Remove the page from the paging queues. 2329 */ 2330 vm_pageq_remove(m); 2331 2332 /* 2333 * Remove the page from the object's collection of resident 2334 * pages. 2335 */ 2336 if ((next = TAILQ_NEXT(m, listq)) != NULL && next->left == m) { 2337 /* 2338 * Since the page's successor in the list is also its parent 2339 * in the tree, its right subtree must be empty. 2340 */ 2341 next->left = m->left; 2342 KASSERT(m->right == NULL, 2343 ("vm_page_cache: page %p has right child", m)); 2344 } else if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL && 2345 prev->right == m) { 2346 /* 2347 * Since the page's predecessor in the list is also its parent 2348 * in the tree, its left subtree must be empty. 2349 */ 2350 KASSERT(m->left == NULL, 2351 ("vm_page_cache: page %p has left child", m)); 2352 prev->right = m->right; 2353 } else { 2354 if (m != object->root) 2355 vm_page_splay(m->pindex, object->root); 2356 if (m->left == NULL) 2357 root = m->right; 2358 else if (m->right == NULL) 2359 root = m->left; 2360 else { 2361 /* 2362 * Move the page's successor to the root, because 2363 * pages are usually removed in ascending order. 2364 */ 2365 if (m->right != next) 2366 vm_page_splay(m->pindex, m->right); 2367 next->left = m->left; 2368 root = next; 2369 } 2370 object->root = root; 2371 } 2372 TAILQ_REMOVE(&object->memq, m, listq); 2373 object->resident_page_count--; 2374 2375 /* 2376 * Restore the default memory attribute to the page. 2377 */ 2378 if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT) 2379 pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT); 2380 2381 /* 2382 * Insert the page into the object's collection of cached pages 2383 * and the physical memory allocator's cache/free page queues. 2384 */ 2385 m->flags &= ~PG_ZERO; 2386 mtx_lock(&vm_page_queue_free_mtx); 2387 m->flags |= PG_CACHED; 2388 cnt.v_cache_count++; 2389 root = object->cache; 2390 if (root == NULL) { 2391 m->left = NULL; 2392 m->right = NULL; 2393 } else { 2394 root = vm_page_splay(m->pindex, root); 2395 if (m->pindex < root->pindex) { 2396 m->left = root->left; 2397 m->right = root; 2398 root->left = NULL; 2399 } else if (__predict_false(m->pindex == root->pindex)) 2400 panic("vm_page_cache: offset already cached"); 2401 else { 2402 m->right = root->right; 2403 m->left = root; 2404 root->right = NULL; 2405 } 2406 } 2407 object->cache = m; 2408#if VM_NRESERVLEVEL > 0 2409 if (!vm_reserv_free_page(m)) { 2410#else 2411 if (TRUE) { 2412#endif 2413 vm_phys_set_pool(VM_FREEPOOL_CACHE, m, 0); 2414 vm_phys_free_pages(m, 0); 2415 } 2416 vm_page_free_wakeup(); 2417 mtx_unlock(&vm_page_queue_free_mtx); 2418 2419 /* 2420 * Increment the vnode's hold count if this is the object's only 2421 * cached page. Decrement the vnode's hold count if this was 2422 * the object's only resident page. 2423 */ 2424 if (object->type == OBJT_VNODE) { 2425 if (root == NULL && object->resident_page_count != 0) 2426 vhold(object->handle); 2427 else if (root != NULL && object->resident_page_count == 0) 2428 vdrop(object->handle); 2429 } 2430} 2431 2432/* 2433 * vm_page_dontneed 2434 * 2435 * Cache, deactivate, or do nothing as appropriate. This routine 2436 * is typically used by madvise() MADV_DONTNEED. 2437 * 2438 * Generally speaking we want to move the page into the cache so 2439 * it gets reused quickly. However, this can result in a silly syndrome 2440 * due to the page recycling too quickly. Small objects will not be 2441 * fully cached. On the otherhand, if we move the page to the inactive 2442 * queue we wind up with a problem whereby very large objects 2443 * unnecessarily blow away our inactive and cache queues. 2444 * 2445 * The solution is to move the pages based on a fixed weighting. We 2446 * either leave them alone, deactivate them, or move them to the cache, 2447 * where moving them to the cache has the highest weighting. 2448 * By forcing some pages into other queues we eventually force the 2449 * system to balance the queues, potentially recovering other unrelated 2450 * space from active. The idea is to not force this to happen too 2451 * often. 2452 */ 2453void 2454vm_page_dontneed(vm_page_t m) 2455{ 2456 int dnw; 2457 int head; 2458 2459 vm_page_lock_assert(m, MA_OWNED); 2460 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2461 dnw = PCPU_GET(dnweight); 2462 PCPU_INC(dnweight); 2463 2464 /* 2465 * Occasionally leave the page alone. 2466 */ 2467 if ((dnw & 0x01F0) == 0 || m->queue == PQ_INACTIVE) { 2468 if (m->act_count >= ACT_INIT) 2469 --m->act_count; 2470 return; 2471 } 2472 2473 /* 2474 * Clear any references to the page. Otherwise, the page daemon will 2475 * immediately reactivate the page. 2476 * 2477 * Perform the pmap_clear_reference() first. Otherwise, a concurrent 2478 * pmap operation, such as pmap_remove(), could clear a reference in 2479 * the pmap and set PGA_REFERENCED on the page before the 2480 * pmap_clear_reference() had completed. Consequently, the page would 2481 * appear referenced based upon an old reference that occurred before 2482 * this function ran. 2483 */ 2484 pmap_clear_reference(m); 2485 vm_page_aflag_clear(m, PGA_REFERENCED); 2486 2487 if (m->dirty == 0 && pmap_is_modified(m)) 2488 vm_page_dirty(m); 2489 2490 if (m->dirty || (dnw & 0x0070) == 0) { 2491 /* 2492 * Deactivate the page 3 times out of 32. 2493 */ 2494 head = 0; 2495 } else { 2496 /* 2497 * Cache the page 28 times out of every 32. Note that 2498 * the page is deactivated instead of cached, but placed 2499 * at the head of the queue instead of the tail. 2500 */ 2501 head = 1; 2502 } 2503 _vm_page_deactivate(m, head); 2504} 2505 2506/* 2507 * Grab a page, waiting until we are waken up due to the page 2508 * changing state. We keep on waiting, if the page continues 2509 * to be in the object. If the page doesn't exist, first allocate it 2510 * and then conditionally zero it. 2511 * 2512 * The caller must always specify the VM_ALLOC_RETRY flag. This is intended 2513 * to facilitate its eventual removal. 2514 * 2515 * This routine may block. 2516 */ 2517vm_page_t 2518vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags) 2519{ 2520 vm_page_t m; 2521 2522 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2523 KASSERT((allocflags & VM_ALLOC_RETRY) != 0, 2524 ("vm_page_grab: VM_ALLOC_RETRY is required")); 2525retrylookup: 2526 if ((m = vm_page_lookup(object, pindex)) != NULL) { 2527 if ((m->oflags & VPO_BUSY) != 0 || 2528 ((allocflags & VM_ALLOC_IGN_SBUSY) == 0 && m->busy != 0)) { 2529 /* 2530 * Reference the page before unlocking and 2531 * sleeping so that the page daemon is less 2532 * likely to reclaim it. 2533 */ 2534 vm_page_aflag_set(m, PGA_REFERENCED); 2535 vm_page_sleep(m, "pgrbwt"); 2536 goto retrylookup; 2537 } else { 2538 if ((allocflags & VM_ALLOC_WIRED) != 0) { 2539 vm_page_lock(m); 2540 vm_page_wire(m); 2541 vm_page_unlock(m); 2542 } 2543 if ((allocflags & VM_ALLOC_NOBUSY) == 0) 2544 vm_page_busy(m); 2545 return (m); 2546 } 2547 } 2548 m = vm_page_alloc(object, pindex, allocflags & ~(VM_ALLOC_RETRY | 2549 VM_ALLOC_IGN_SBUSY)); 2550 if (m == NULL) { 2551 VM_OBJECT_UNLOCK(object); 2552 VM_WAIT; 2553 VM_OBJECT_LOCK(object); 2554 goto retrylookup; 2555 } else if (m->valid != 0) 2556 return (m); 2557 if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0) 2558 pmap_zero_page(m); 2559 return (m); 2560} 2561 2562/* 2563 * Mapping function for valid bits or for dirty bits in 2564 * a page. May not block. 2565 * 2566 * Inputs are required to range within a page. 2567 */ 2568vm_page_bits_t 2569vm_page_bits(int base, int size) 2570{ 2571 int first_bit; 2572 int last_bit; 2573 2574 KASSERT( 2575 base + size <= PAGE_SIZE, 2576 ("vm_page_bits: illegal base/size %d/%d", base, size) 2577 ); 2578 2579 if (size == 0) /* handle degenerate case */ 2580 return (0); 2581 2582 first_bit = base >> DEV_BSHIFT; 2583 last_bit = (base + size - 1) >> DEV_BSHIFT; 2584 2585 return (((vm_page_bits_t)2 << last_bit) - 2586 ((vm_page_bits_t)1 << first_bit)); 2587} 2588 2589/* 2590 * vm_page_set_valid_range: 2591 * 2592 * Sets portions of a page valid. The arguments are expected 2593 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive 2594 * of any partial chunks touched by the range. The invalid portion of 2595 * such chunks will be zeroed. 2596 * 2597 * (base + size) must be less then or equal to PAGE_SIZE. 2598 */ 2599void 2600vm_page_set_valid_range(vm_page_t m, int base, int size) 2601{ 2602 int endoff, frag; 2603 2604 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2605 if (size == 0) /* handle degenerate case */ 2606 return; 2607 2608 /* 2609 * If the base is not DEV_BSIZE aligned and the valid 2610 * bit is clear, we have to zero out a portion of the 2611 * first block. 2612 */ 2613 if ((frag = base & ~(DEV_BSIZE - 1)) != base && 2614 (m->valid & (1 << (base >> DEV_BSHIFT))) == 0) 2615 pmap_zero_page_area(m, frag, base - frag); 2616 2617 /* 2618 * If the ending offset is not DEV_BSIZE aligned and the 2619 * valid bit is clear, we have to zero out a portion of 2620 * the last block. 2621 */ 2622 endoff = base + size; 2623 if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff && 2624 (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0) 2625 pmap_zero_page_area(m, endoff, 2626 DEV_BSIZE - (endoff & (DEV_BSIZE - 1))); 2627 2628 /* 2629 * Assert that no previously invalid block that is now being validated 2630 * is already dirty. 2631 */ 2632 KASSERT((~m->valid & vm_page_bits(base, size) & m->dirty) == 0, 2633 ("vm_page_set_valid_range: page %p is dirty", m)); 2634 2635 /* 2636 * Set valid bits inclusive of any overlap. 2637 */ 2638 m->valid |= vm_page_bits(base, size); 2639} 2640 2641/* 2642 * Clear the given bits from the specified page's dirty field. 2643 */ 2644static __inline void 2645vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits) 2646{ 2647 uintptr_t addr; 2648#if PAGE_SIZE < 16384 2649 int shift; 2650#endif 2651 2652 /* 2653 * If the object is locked and the page is neither VPO_BUSY nor 2654 * write mapped, then the page's dirty field cannot possibly be 2655 * set by a concurrent pmap operation. 2656 */ 2657 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2658 if ((m->oflags & VPO_BUSY) == 0 && !pmap_page_is_write_mapped(m)) 2659 m->dirty &= ~pagebits; 2660 else { 2661 /* 2662 * The pmap layer can call vm_page_dirty() without 2663 * holding a distinguished lock. The combination of 2664 * the object's lock and an atomic operation suffice 2665 * to guarantee consistency of the page dirty field. 2666 * 2667 * For PAGE_SIZE == 32768 case, compiler already 2668 * properly aligns the dirty field, so no forcible 2669 * alignment is needed. Only require existence of 2670 * atomic_clear_64 when page size is 32768. 2671 */ 2672 addr = (uintptr_t)&m->dirty; 2673#if PAGE_SIZE == 32768 2674 atomic_clear_64((uint64_t *)addr, pagebits); 2675#elif PAGE_SIZE == 16384 2676 atomic_clear_32((uint32_t *)addr, pagebits); 2677#else /* PAGE_SIZE <= 8192 */ 2678 /* 2679 * Use a trick to perform a 32-bit atomic on the 2680 * containing aligned word, to not depend on the existence 2681 * of atomic_clear_{8, 16}. 2682 */ 2683 shift = addr & (sizeof(uint32_t) - 1); 2684#if BYTE_ORDER == BIG_ENDIAN 2685 shift = (sizeof(uint32_t) - sizeof(m->dirty) - shift) * NBBY; 2686#else 2687 shift *= NBBY; 2688#endif 2689 addr &= ~(sizeof(uint32_t) - 1); 2690 atomic_clear_32((uint32_t *)addr, pagebits << shift); 2691#endif /* PAGE_SIZE */ 2692 } 2693} 2694 2695/* 2696 * vm_page_set_validclean: 2697 * 2698 * Sets portions of a page valid and clean. The arguments are expected 2699 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive 2700 * of any partial chunks touched by the range. The invalid portion of 2701 * such chunks will be zero'd. 2702 * 2703 * This routine may not block. 2704 * 2705 * (base + size) must be less then or equal to PAGE_SIZE. 2706 */ 2707void 2708vm_page_set_validclean(vm_page_t m, int base, int size) 2709{ 2710 vm_page_bits_t oldvalid, pagebits; 2711 int endoff, frag; 2712 2713 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2714 if (size == 0) /* handle degenerate case */ 2715 return; 2716 2717 /* 2718 * If the base is not DEV_BSIZE aligned and the valid 2719 * bit is clear, we have to zero out a portion of the 2720 * first block. 2721 */ 2722 if ((frag = base & ~(DEV_BSIZE - 1)) != base && 2723 (m->valid & ((vm_page_bits_t)1 << (base >> DEV_BSHIFT))) == 0) 2724 pmap_zero_page_area(m, frag, base - frag); 2725 2726 /* 2727 * If the ending offset is not DEV_BSIZE aligned and the 2728 * valid bit is clear, we have to zero out a portion of 2729 * the last block. 2730 */ 2731 endoff = base + size; 2732 if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff && 2733 (m->valid & ((vm_page_bits_t)1 << (endoff >> DEV_BSHIFT))) == 0) 2734 pmap_zero_page_area(m, endoff, 2735 DEV_BSIZE - (endoff & (DEV_BSIZE - 1))); 2736 2737 /* 2738 * Set valid, clear dirty bits. If validating the entire 2739 * page we can safely clear the pmap modify bit. We also 2740 * use this opportunity to clear the VPO_NOSYNC flag. If a process 2741 * takes a write fault on a MAP_NOSYNC memory area the flag will 2742 * be set again. 2743 * 2744 * We set valid bits inclusive of any overlap, but we can only 2745 * clear dirty bits for DEV_BSIZE chunks that are fully within 2746 * the range. 2747 */ 2748 oldvalid = m->valid; 2749 pagebits = vm_page_bits(base, size); 2750 m->valid |= pagebits; 2751#if 0 /* NOT YET */ 2752 if ((frag = base & (DEV_BSIZE - 1)) != 0) { 2753 frag = DEV_BSIZE - frag; 2754 base += frag; 2755 size -= frag; 2756 if (size < 0) 2757 size = 0; 2758 } 2759 pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1)); 2760#endif 2761 if (base == 0 && size == PAGE_SIZE) { 2762 /* 2763 * The page can only be modified within the pmap if it is 2764 * mapped, and it can only be mapped if it was previously 2765 * fully valid. 2766 */ 2767 if (oldvalid == VM_PAGE_BITS_ALL) 2768 /* 2769 * Perform the pmap_clear_modify() first. Otherwise, 2770 * a concurrent pmap operation, such as 2771 * pmap_protect(), could clear a modification in the 2772 * pmap and set the dirty field on the page before 2773 * pmap_clear_modify() had begun and after the dirty 2774 * field was cleared here. 2775 */ 2776 pmap_clear_modify(m); 2777 m->dirty = 0; 2778 m->oflags &= ~VPO_NOSYNC; 2779 } else if (oldvalid != VM_PAGE_BITS_ALL) 2780 m->dirty &= ~pagebits; 2781 else 2782 vm_page_clear_dirty_mask(m, pagebits); 2783} 2784 2785void 2786vm_page_clear_dirty(vm_page_t m, int base, int size) 2787{ 2788 2789 vm_page_clear_dirty_mask(m, vm_page_bits(base, size)); 2790} 2791 2792/* 2793 * vm_page_set_invalid: 2794 * 2795 * Invalidates DEV_BSIZE'd chunks within a page. Both the 2796 * valid and dirty bits for the effected areas are cleared. 2797 * 2798 * May not block. 2799 */ 2800void 2801vm_page_set_invalid(vm_page_t m, int base, int size) 2802{ 2803 vm_page_bits_t bits; 2804 2805 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2806 KASSERT((m->oflags & VPO_BUSY) == 0, 2807 ("vm_page_set_invalid: page %p is busy", m)); 2808 bits = vm_page_bits(base, size); 2809 if (m->valid == VM_PAGE_BITS_ALL && bits != 0) 2810 pmap_remove_all(m); 2811 KASSERT(!pmap_page_is_mapped(m), 2812 ("vm_page_set_invalid: page %p is mapped", m)); 2813 m->valid &= ~bits; 2814 m->dirty &= ~bits; 2815} 2816 2817/* 2818 * vm_page_zero_invalid() 2819 * 2820 * The kernel assumes that the invalid portions of a page contain 2821 * garbage, but such pages can be mapped into memory by user code. 2822 * When this occurs, we must zero out the non-valid portions of the 2823 * page so user code sees what it expects. 2824 * 2825 * Pages are most often semi-valid when the end of a file is mapped 2826 * into memory and the file's size is not page aligned. 2827 */ 2828void 2829vm_page_zero_invalid(vm_page_t m, boolean_t setvalid) 2830{ 2831 int b; 2832 int i; 2833 2834 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2835 /* 2836 * Scan the valid bits looking for invalid sections that 2837 * must be zerod. Invalid sub-DEV_BSIZE'd areas ( where the 2838 * valid bit may be set ) have already been zerod by 2839 * vm_page_set_validclean(). 2840 */ 2841 for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) { 2842 if (i == (PAGE_SIZE / DEV_BSIZE) || 2843 (m->valid & ((vm_page_bits_t)1 << i))) { 2844 if (i > b) { 2845 pmap_zero_page_area(m, 2846 b << DEV_BSHIFT, (i - b) << DEV_BSHIFT); 2847 } 2848 b = i + 1; 2849 } 2850 } 2851 2852 /* 2853 * setvalid is TRUE when we can safely set the zero'd areas 2854 * as being valid. We can do this if there are no cache consistancy 2855 * issues. e.g. it is ok to do with UFS, but not ok to do with NFS. 2856 */ 2857 if (setvalid) 2858 m->valid = VM_PAGE_BITS_ALL; 2859} 2860 2861/* 2862 * vm_page_is_valid: 2863 * 2864 * Is (partial) page valid? Note that the case where size == 0 2865 * will return FALSE in the degenerate case where the page is 2866 * entirely invalid, and TRUE otherwise. 2867 * 2868 * May not block. 2869 */ 2870int 2871vm_page_is_valid(vm_page_t m, int base, int size) 2872{ 2873 vm_page_bits_t bits; 2874 2875 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2876 bits = vm_page_bits(base, size); 2877 if (m->valid && ((m->valid & bits) == bits)) 2878 return 1; 2879 else 2880 return 0; 2881} 2882 2883/* 2884 * update dirty bits from pmap/mmu. May not block. 2885 */ 2886void 2887vm_page_test_dirty(vm_page_t m) 2888{ 2889 2890 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2891 if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m)) 2892 vm_page_dirty(m); 2893} 2894 2895void 2896vm_page_lock_KBI(vm_page_t m, const char *file, int line) 2897{ 2898 2899 mtx_lock_flags_(vm_page_lockptr(m), 0, file, line); 2900} 2901 2902void 2903vm_page_unlock_KBI(vm_page_t m, const char *file, int line) 2904{ 2905 2906 mtx_unlock_flags_(vm_page_lockptr(m), 0, file, line); 2907} 2908 2909int 2910vm_page_trylock_KBI(vm_page_t m, const char *file, int line) 2911{ 2912 2913 return (mtx_trylock_flags_(vm_page_lockptr(m), 0, file, line)); 2914} 2915 2916#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) 2917void 2918vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line) 2919{ 2920 2921 mtx_assert_(vm_page_lockptr(m), a, file, line); 2922} 2923#endif 2924 2925int so_zerocp_fullpage = 0; 2926 2927/* 2928 * Replace the given page with a copy. The copied page assumes 2929 * the portion of the given page's "wire_count" that is not the 2930 * responsibility of this copy-on-write mechanism. 2931 * 2932 * The object containing the given page must have a non-zero 2933 * paging-in-progress count and be locked. 2934 */ 2935void 2936vm_page_cowfault(vm_page_t m) 2937{ 2938 vm_page_t mnew; 2939 vm_object_t object; 2940 vm_pindex_t pindex; 2941 2942 mtx_assert(&vm_page_queue_mtx, MA_NOTOWNED); 2943 vm_page_lock_assert(m, MA_OWNED); 2944 object = m->object; 2945 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2946 KASSERT(object->paging_in_progress != 0, 2947 ("vm_page_cowfault: object %p's paging-in-progress count is zero.", 2948 object)); 2949 pindex = m->pindex; 2950 2951 retry_alloc: 2952 pmap_remove_all(m); 2953 vm_page_remove(m); 2954 mnew = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY); 2955 if (mnew == NULL) { 2956 vm_page_insert(m, object, pindex); 2957 vm_page_unlock(m); 2958 VM_OBJECT_UNLOCK(object); 2959 VM_WAIT; 2960 VM_OBJECT_LOCK(object); 2961 if (m == vm_page_lookup(object, pindex)) { 2962 vm_page_lock(m); 2963 goto retry_alloc; 2964 } else { 2965 /* 2966 * Page disappeared during the wait. 2967 */ 2968 return; 2969 } 2970 } 2971 2972 if (m->cow == 0) { 2973 /* 2974 * check to see if we raced with an xmit complete when 2975 * waiting to allocate a page. If so, put things back 2976 * the way they were 2977 */ 2978 vm_page_unlock(m); 2979 vm_page_lock(mnew); 2980 vm_page_free(mnew); 2981 vm_page_unlock(mnew); 2982 vm_page_insert(m, object, pindex); 2983 } else { /* clear COW & copy page */ 2984 if (!so_zerocp_fullpage) 2985 pmap_copy_page(m, mnew); 2986 mnew->valid = VM_PAGE_BITS_ALL; 2987 vm_page_dirty(mnew); 2988 mnew->wire_count = m->wire_count - m->cow; 2989 m->wire_count = m->cow; 2990 vm_page_unlock(m); 2991 } 2992} 2993 2994void 2995vm_page_cowclear(vm_page_t m) 2996{ 2997 2998 vm_page_lock_assert(m, MA_OWNED); 2999 if (m->cow) { 3000 m->cow--; 3001 /* 3002 * let vm_fault add back write permission lazily 3003 */ 3004 } 3005 /* 3006 * sf_buf_free() will free the page, so we needn't do it here 3007 */ 3008} 3009 3010int 3011vm_page_cowsetup(vm_page_t m) 3012{ 3013 3014 vm_page_lock_assert(m, MA_OWNED); 3015 if ((m->flags & PG_FICTITIOUS) != 0 || 3016 (m->oflags & VPO_UNMANAGED) != 0 || 3017 m->cow == USHRT_MAX - 1 || !VM_OBJECT_TRYLOCK(m->object)) 3018 return (EBUSY); 3019 m->cow++; 3020 pmap_remove_write(m); 3021 VM_OBJECT_UNLOCK(m->object); 3022 return (0); 3023} 3024 3025#ifdef INVARIANTS 3026void 3027vm_page_object_lock_assert(vm_page_t m) 3028{ 3029 3030 /* 3031 * Certain of the page's fields may only be modified by the 3032 * holder of the containing object's lock or the setter of the 3033 * page's VPO_BUSY flag. Unfortunately, the setter of the 3034 * VPO_BUSY flag is not recorded, and thus cannot be checked 3035 * here. 3036 */ 3037 if (m->object != NULL && (m->oflags & VPO_BUSY) == 0) 3038 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 3039} 3040#endif 3041 3042#include "opt_ddb.h" 3043#ifdef DDB 3044#include <sys/kernel.h> 3045 3046#include <ddb/ddb.h> 3047 3048DB_SHOW_COMMAND(page, vm_page_print_page_info) 3049{ 3050 db_printf("cnt.v_free_count: %d\n", cnt.v_free_count); 3051 db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count); 3052 db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count); 3053 db_printf("cnt.v_active_count: %d\n", cnt.v_active_count); 3054 db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count); 3055 db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved); 3056 db_printf("cnt.v_free_min: %d\n", cnt.v_free_min); 3057 db_printf("cnt.v_free_target: %d\n", cnt.v_free_target); 3058 db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min); 3059 db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target); 3060} 3061 3062DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info) 3063{ 3064 3065 db_printf("PQ_FREE:"); 3066 db_printf(" %d", cnt.v_free_count); 3067 db_printf("\n"); 3068 3069 db_printf("PQ_CACHE:"); 3070 db_printf(" %d", cnt.v_cache_count); 3071 db_printf("\n"); 3072 3073 db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n", 3074 *vm_page_queues[PQ_ACTIVE].cnt, 3075 *vm_page_queues[PQ_INACTIVE].cnt); 3076} 3077#endif /* DDB */ 3078