vm_page.c revision 208524
1/*- 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1998 Matthew Dillon. All Rights Reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * The Mach Operating System project at Carnegie-Mellon University. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 34 */ 35 36/*- 37 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 38 * All rights reserved. 39 * 40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 */ 62 63/* 64 * GENERAL RULES ON VM_PAGE MANIPULATION 65 * 66 * - a pageq mutex is required when adding or removing a page from a 67 * page queue (vm_page_queue[]), regardless of other mutexes or the 68 * busy state of a page. 69 * 70 * - a hash chain mutex is required when associating or disassociating 71 * a page from the VM PAGE CACHE hash table (vm_page_buckets), 72 * regardless of other mutexes or the busy state of a page. 73 * 74 * - either a hash chain mutex OR a busied page is required in order 75 * to modify the page flags. A hash chain mutex must be obtained in 76 * order to busy a page. A page's flags cannot be modified by a 77 * hash chain mutex if the page is marked busy. 78 * 79 * - The object memq mutex is held when inserting or removing 80 * pages from an object (vm_page_insert() or vm_page_remove()). This 81 * is different from the object's main mutex. 82 * 83 * Generally speaking, you have to be aware of side effects when running 84 * vm_page ops. A vm_page_lookup() will return with the hash chain 85 * locked, whether it was able to lookup the page or not. vm_page_free(), 86 * vm_page_cache(), vm_page_activate(), and a number of other routines 87 * will release the hash chain mutex for you. Intermediate manipulation 88 * routines such as vm_page_flag_set() expect the hash chain to be held 89 * on entry and the hash chain will remain held on return. 90 * 91 * pageq scanning can only occur with the pageq in question locked. 92 * We have a known bottleneck with the active queue, but the cache 93 * and free queues are actually arrays already. 94 */ 95 96/* 97 * Resident memory management module. 98 */ 99 100#include <sys/cdefs.h> 101__FBSDID("$FreeBSD: head/sys/vm/vm_page.c 208524 2010-05-25 02:26:25Z alc $"); 102 103#include "opt_vm.h" 104 105#include <sys/param.h> 106#include <sys/systm.h> 107#include <sys/lock.h> 108#include <sys/kernel.h> 109#include <sys/limits.h> 110#include <sys/malloc.h> 111#include <sys/msgbuf.h> 112#include <sys/mutex.h> 113#include <sys/proc.h> 114#include <sys/sysctl.h> 115#include <sys/vmmeter.h> 116#include <sys/vnode.h> 117 118#include <vm/vm.h> 119#include <vm/pmap.h> 120#include <vm/vm_param.h> 121#include <vm/vm_kern.h> 122#include <vm/vm_object.h> 123#include <vm/vm_page.h> 124#include <vm/vm_pageout.h> 125#include <vm/vm_pager.h> 126#include <vm/vm_phys.h> 127#include <vm/vm_reserv.h> 128#include <vm/vm_extern.h> 129#include <vm/uma.h> 130#include <vm/uma_int.h> 131 132#include <machine/md_var.h> 133 134#if defined(__amd64__) || defined (__i386__) 135extern struct sysctl_oid_list sysctl__vm_pmap_children; 136#else 137SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters"); 138#endif 139 140static uint64_t pmap_tryrelock_calls; 141SYSCTL_QUAD(_vm_pmap, OID_AUTO, tryrelock_calls, CTLFLAG_RD, 142 &pmap_tryrelock_calls, 0, "Number of tryrelock calls"); 143 144static int pmap_tryrelock_restart; 145SYSCTL_INT(_vm_pmap, OID_AUTO, tryrelock_restart, CTLFLAG_RD, 146 &pmap_tryrelock_restart, 0, "Number of tryrelock restarts"); 147 148static int pmap_tryrelock_race; 149SYSCTL_INT(_vm_pmap, OID_AUTO, tryrelock_race, CTLFLAG_RD, 150 &pmap_tryrelock_race, 0, "Number of tryrelock pmap race cases"); 151 152/* 153 * Associated with page of user-allocatable memory is a 154 * page structure. 155 */ 156 157struct vpgqueues vm_page_queues[PQ_COUNT]; 158struct vpglocks vm_page_queue_lock; 159struct vpglocks vm_page_queue_free_lock; 160 161struct vpglocks pa_lock[PA_LOCK_COUNT] __aligned(CACHE_LINE_SIZE); 162 163vm_page_t vm_page_array = 0; 164int vm_page_array_size = 0; 165long first_page = 0; 166int vm_page_zero_count = 0; 167 168static int boot_pages = UMA_BOOT_PAGES; 169TUNABLE_INT("vm.boot_pages", &boot_pages); 170SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RD, &boot_pages, 0, 171 "number of pages allocated for bootstrapping the VM system"); 172 173static void vm_page_queue_remove(int queue, vm_page_t m); 174static void vm_page_enqueue(int queue, vm_page_t m); 175 176/* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */ 177#if PAGE_SIZE == 32768 178#ifdef CTASSERT 179CTASSERT(sizeof(u_long) >= 8); 180#endif 181#endif 182 183/* 184 * Try to acquire a physical address lock while a pmap is locked. If we 185 * fail to trylock we unlock and lock the pmap directly and cache the 186 * locked pa in *locked. The caller should then restart their loop in case 187 * the virtual to physical mapping has changed. 188 */ 189int 190vm_page_pa_tryrelock(pmap_t pmap, vm_paddr_t pa, vm_paddr_t *locked) 191{ 192 vm_paddr_t lockpa; 193 uint32_t gen_count; 194 195 gen_count = pmap->pm_gen_count; 196 atomic_add_long((volatile long *)&pmap_tryrelock_calls, 1); 197 lockpa = *locked; 198 *locked = pa; 199 if (lockpa) { 200 PA_LOCK_ASSERT(lockpa, MA_OWNED); 201 if (PA_LOCKPTR(pa) == PA_LOCKPTR(lockpa)) 202 return (0); 203 PA_UNLOCK(lockpa); 204 } 205 if (PA_TRYLOCK(pa)) 206 return (0); 207 PMAP_UNLOCK(pmap); 208 atomic_add_int((volatile int *)&pmap_tryrelock_restart, 1); 209 PA_LOCK(pa); 210 PMAP_LOCK(pmap); 211 212 if (pmap->pm_gen_count != gen_count + 1) { 213 pmap->pm_retries++; 214 atomic_add_int((volatile int *)&pmap_tryrelock_race, 1); 215 return (EAGAIN); 216 } 217 return (0); 218} 219 220/* 221 * vm_set_page_size: 222 * 223 * Sets the page size, perhaps based upon the memory 224 * size. Must be called before any use of page-size 225 * dependent functions. 226 */ 227void 228vm_set_page_size(void) 229{ 230 if (cnt.v_page_size == 0) 231 cnt.v_page_size = PAGE_SIZE; 232 if (((cnt.v_page_size - 1) & cnt.v_page_size) != 0) 233 panic("vm_set_page_size: page size not a power of two"); 234} 235 236/* 237 * vm_page_blacklist_lookup: 238 * 239 * See if a physical address in this page has been listed 240 * in the blacklist tunable. Entries in the tunable are 241 * separated by spaces or commas. If an invalid integer is 242 * encountered then the rest of the string is skipped. 243 */ 244static int 245vm_page_blacklist_lookup(char *list, vm_paddr_t pa) 246{ 247 vm_paddr_t bad; 248 char *cp, *pos; 249 250 for (pos = list; *pos != '\0'; pos = cp) { 251 bad = strtoq(pos, &cp, 0); 252 if (*cp != '\0') { 253 if (*cp == ' ' || *cp == ',') { 254 cp++; 255 if (cp == pos) 256 continue; 257 } else 258 break; 259 } 260 if (pa == trunc_page(bad)) 261 return (1); 262 } 263 return (0); 264} 265 266/* 267 * vm_page_startup: 268 * 269 * Initializes the resident memory module. 270 * 271 * Allocates memory for the page cells, and 272 * for the object/offset-to-page hash table headers. 273 * Each page cell is initialized and placed on the free list. 274 */ 275vm_offset_t 276vm_page_startup(vm_offset_t vaddr) 277{ 278 vm_offset_t mapped; 279 vm_paddr_t page_range; 280 vm_paddr_t new_end; 281 int i; 282 vm_paddr_t pa; 283 int nblocks; 284 vm_paddr_t last_pa; 285 char *list; 286 287 /* the biggest memory array is the second group of pages */ 288 vm_paddr_t end; 289 vm_paddr_t biggestsize; 290 vm_paddr_t low_water, high_water; 291 int biggestone; 292 293 biggestsize = 0; 294 biggestone = 0; 295 nblocks = 0; 296 vaddr = round_page(vaddr); 297 298 for (i = 0; phys_avail[i + 1]; i += 2) { 299 phys_avail[i] = round_page(phys_avail[i]); 300 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]); 301 } 302 303 low_water = phys_avail[0]; 304 high_water = phys_avail[1]; 305 306 for (i = 0; phys_avail[i + 1]; i += 2) { 307 vm_paddr_t size = phys_avail[i + 1] - phys_avail[i]; 308 309 if (size > biggestsize) { 310 biggestone = i; 311 biggestsize = size; 312 } 313 if (phys_avail[i] < low_water) 314 low_water = phys_avail[i]; 315 if (phys_avail[i + 1] > high_water) 316 high_water = phys_avail[i + 1]; 317 ++nblocks; 318 } 319 320#ifdef XEN 321 low_water = 0; 322#endif 323 324 end = phys_avail[biggestone+1]; 325 326 /* 327 * Initialize the locks. 328 */ 329 mtx_init(&vm_page_queue_mtx, "vm page queue mutex", NULL, MTX_DEF | 330 MTX_RECURSE); 331 mtx_init(&vm_page_queue_free_mtx, "vm page queue free mutex", NULL, 332 MTX_DEF); 333 334 /* Setup page locks. */ 335 for (i = 0; i < PA_LOCK_COUNT; i++) 336 mtx_init(&pa_lock[i].data, "page lock", NULL, 337 MTX_DEF | MTX_RECURSE | MTX_DUPOK); 338 339 /* 340 * Initialize the queue headers for the hold queue, the active queue, 341 * and the inactive queue. 342 */ 343 for (i = 0; i < PQ_COUNT; i++) 344 TAILQ_INIT(&vm_page_queues[i].pl); 345 vm_page_queues[PQ_INACTIVE].cnt = &cnt.v_inactive_count; 346 vm_page_queues[PQ_ACTIVE].cnt = &cnt.v_active_count; 347 vm_page_queues[PQ_HOLD].cnt = &cnt.v_active_count; 348 349 /* 350 * Allocate memory for use when boot strapping the kernel memory 351 * allocator. 352 */ 353 new_end = end - (boot_pages * UMA_SLAB_SIZE); 354 new_end = trunc_page(new_end); 355 mapped = pmap_map(&vaddr, new_end, end, 356 VM_PROT_READ | VM_PROT_WRITE); 357 bzero((void *)mapped, end - new_end); 358 uma_startup((void *)mapped, boot_pages); 359 360#if defined(__amd64__) || defined(__i386__) || defined(__arm__) 361 /* 362 * Allocate a bitmap to indicate that a random physical page 363 * needs to be included in a minidump. 364 * 365 * The amd64 port needs this to indicate which direct map pages 366 * need to be dumped, via calls to dump_add_page()/dump_drop_page(). 367 * 368 * However, i386 still needs this workspace internally within the 369 * minidump code. In theory, they are not needed on i386, but are 370 * included should the sf_buf code decide to use them. 371 */ 372 page_range = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE; 373 vm_page_dump_size = round_page(roundup2(page_range, NBBY) / NBBY); 374 new_end -= vm_page_dump_size; 375 vm_page_dump = (void *)(uintptr_t)pmap_map(&vaddr, new_end, 376 new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE); 377 bzero((void *)vm_page_dump, vm_page_dump_size); 378#endif 379#ifdef __amd64__ 380 /* 381 * Request that the physical pages underlying the message buffer be 382 * included in a crash dump. Since the message buffer is accessed 383 * through the direct map, they are not automatically included. 384 */ 385 pa = DMAP_TO_PHYS((vm_offset_t)msgbufp->msg_ptr); 386 last_pa = pa + round_page(MSGBUF_SIZE); 387 while (pa < last_pa) { 388 dump_add_page(pa); 389 pa += PAGE_SIZE; 390 } 391#endif 392 /* 393 * Compute the number of pages of memory that will be available for 394 * use (taking into account the overhead of a page structure per 395 * page). 396 */ 397 first_page = low_water / PAGE_SIZE; 398#ifdef VM_PHYSSEG_SPARSE 399 page_range = 0; 400 for (i = 0; phys_avail[i + 1] != 0; i += 2) 401 page_range += atop(phys_avail[i + 1] - phys_avail[i]); 402#elif defined(VM_PHYSSEG_DENSE) 403 page_range = high_water / PAGE_SIZE - first_page; 404#else 405#error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined." 406#endif 407 end = new_end; 408 409 /* 410 * Reserve an unmapped guard page to trap access to vm_page_array[-1]. 411 */ 412 vaddr += PAGE_SIZE; 413 414 /* 415 * Initialize the mem entry structures now, and put them in the free 416 * queue. 417 */ 418 new_end = trunc_page(end - page_range * sizeof(struct vm_page)); 419 mapped = pmap_map(&vaddr, new_end, end, 420 VM_PROT_READ | VM_PROT_WRITE); 421 vm_page_array = (vm_page_t) mapped; 422#if VM_NRESERVLEVEL > 0 423 /* 424 * Allocate memory for the reservation management system's data 425 * structures. 426 */ 427 new_end = vm_reserv_startup(&vaddr, new_end, high_water); 428#endif 429#ifdef __amd64__ 430 /* 431 * pmap_map on amd64 comes out of the direct-map, not kvm like i386, 432 * so the pages must be tracked for a crashdump to include this data. 433 * This includes the vm_page_array and the early UMA bootstrap pages. 434 */ 435 for (pa = new_end; pa < phys_avail[biggestone + 1]; pa += PAGE_SIZE) 436 dump_add_page(pa); 437#endif 438 phys_avail[biggestone + 1] = new_end; 439 440 /* 441 * Clear all of the page structures 442 */ 443 bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page)); 444 for (i = 0; i < page_range; i++) 445 vm_page_array[i].order = VM_NFREEORDER; 446 vm_page_array_size = page_range; 447 448 /* 449 * Initialize the physical memory allocator. 450 */ 451 vm_phys_init(); 452 453 /* 454 * Add every available physical page that is not blacklisted to 455 * the free lists. 456 */ 457 cnt.v_page_count = 0; 458 cnt.v_free_count = 0; 459 list = getenv("vm.blacklist"); 460 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 461 pa = phys_avail[i]; 462 last_pa = phys_avail[i + 1]; 463 while (pa < last_pa) { 464 if (list != NULL && 465 vm_page_blacklist_lookup(list, pa)) 466 printf("Skipping page with pa 0x%jx\n", 467 (uintmax_t)pa); 468 else 469 vm_phys_add_page(pa); 470 pa += PAGE_SIZE; 471 } 472 } 473 freeenv(list); 474#if VM_NRESERVLEVEL > 0 475 /* 476 * Initialize the reservation management system. 477 */ 478 vm_reserv_init(); 479#endif 480 return (vaddr); 481} 482 483void 484vm_page_flag_set(vm_page_t m, unsigned short bits) 485{ 486 487 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 488 m->flags |= bits; 489} 490 491void 492vm_page_flag_clear(vm_page_t m, unsigned short bits) 493{ 494 495 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 496 m->flags &= ~bits; 497} 498 499void 500vm_page_busy(vm_page_t m) 501{ 502 503 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 504 KASSERT((m->oflags & VPO_BUSY) == 0, 505 ("vm_page_busy: page already busy!!!")); 506 m->oflags |= VPO_BUSY; 507} 508 509/* 510 * vm_page_flash: 511 * 512 * wakeup anyone waiting for the page. 513 */ 514void 515vm_page_flash(vm_page_t m) 516{ 517 518 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 519 if (m->oflags & VPO_WANTED) { 520 m->oflags &= ~VPO_WANTED; 521 wakeup(m); 522 } 523} 524 525/* 526 * vm_page_wakeup: 527 * 528 * clear the VPO_BUSY flag and wakeup anyone waiting for the 529 * page. 530 * 531 */ 532void 533vm_page_wakeup(vm_page_t m) 534{ 535 536 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 537 KASSERT(m->oflags & VPO_BUSY, ("vm_page_wakeup: page not busy!!!")); 538 m->oflags &= ~VPO_BUSY; 539 vm_page_flash(m); 540} 541 542void 543vm_page_io_start(vm_page_t m) 544{ 545 546 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 547 m->busy++; 548} 549 550void 551vm_page_io_finish(vm_page_t m) 552{ 553 554 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 555 m->busy--; 556 if (m->busy == 0) 557 vm_page_flash(m); 558} 559 560/* 561 * Keep page from being freed by the page daemon 562 * much of the same effect as wiring, except much lower 563 * overhead and should be used only for *very* temporary 564 * holding ("wiring"). 565 */ 566void 567vm_page_hold(vm_page_t mem) 568{ 569 570 vm_page_lock_assert(mem, MA_OWNED); 571 mem->hold_count++; 572} 573 574void 575vm_page_unhold(vm_page_t mem) 576{ 577 578 vm_page_lock_assert(mem, MA_OWNED); 579 --mem->hold_count; 580 KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!")); 581 if (mem->hold_count == 0 && VM_PAGE_INQUEUE2(mem, PQ_HOLD)) 582 vm_page_free_toq(mem); 583} 584 585/* 586 * vm_page_free: 587 * 588 * Free a page. 589 */ 590void 591vm_page_free(vm_page_t m) 592{ 593 594 m->flags &= ~PG_ZERO; 595 vm_page_free_toq(m); 596} 597 598/* 599 * vm_page_free_zero: 600 * 601 * Free a page to the zerod-pages queue 602 */ 603void 604vm_page_free_zero(vm_page_t m) 605{ 606 607 m->flags |= PG_ZERO; 608 vm_page_free_toq(m); 609} 610 611/* 612 * vm_page_sleep: 613 * 614 * Sleep and release the page and page queues locks. 615 * 616 * The object containing the given page must be locked. 617 */ 618void 619vm_page_sleep(vm_page_t m, const char *msg) 620{ 621 622 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 623 if (mtx_owned(&vm_page_queue_mtx)) 624 vm_page_unlock_queues(); 625 if (mtx_owned(vm_page_lockptr(m))) 626 vm_page_unlock(m); 627 628 /* 629 * It's possible that while we sleep, the page will get 630 * unbusied and freed. If we are holding the object 631 * lock, we will assume we hold a reference to the object 632 * such that even if m->object changes, we can re-lock 633 * it. 634 */ 635 m->oflags |= VPO_WANTED; 636 msleep(m, VM_OBJECT_MTX(m->object), PVM, msg, 0); 637} 638 639/* 640 * vm_page_dirty: 641 * 642 * make page all dirty 643 */ 644void 645vm_page_dirty(vm_page_t m) 646{ 647 648 KASSERT((m->flags & PG_CACHED) == 0, 649 ("vm_page_dirty: page in cache!")); 650 KASSERT(!VM_PAGE_IS_FREE(m), 651 ("vm_page_dirty: page is free!")); 652 KASSERT(m->valid == VM_PAGE_BITS_ALL, 653 ("vm_page_dirty: page is invalid!")); 654 m->dirty = VM_PAGE_BITS_ALL; 655} 656 657/* 658 * vm_page_splay: 659 * 660 * Implements Sleator and Tarjan's top-down splay algorithm. Returns 661 * the vm_page containing the given pindex. If, however, that 662 * pindex is not found in the vm_object, returns a vm_page that is 663 * adjacent to the pindex, coming before or after it. 664 */ 665vm_page_t 666vm_page_splay(vm_pindex_t pindex, vm_page_t root) 667{ 668 struct vm_page dummy; 669 vm_page_t lefttreemax, righttreemin, y; 670 671 if (root == NULL) 672 return (root); 673 lefttreemax = righttreemin = &dummy; 674 for (;; root = y) { 675 if (pindex < root->pindex) { 676 if ((y = root->left) == NULL) 677 break; 678 if (pindex < y->pindex) { 679 /* Rotate right. */ 680 root->left = y->right; 681 y->right = root; 682 root = y; 683 if ((y = root->left) == NULL) 684 break; 685 } 686 /* Link into the new root's right tree. */ 687 righttreemin->left = root; 688 righttreemin = root; 689 } else if (pindex > root->pindex) { 690 if ((y = root->right) == NULL) 691 break; 692 if (pindex > y->pindex) { 693 /* Rotate left. */ 694 root->right = y->left; 695 y->left = root; 696 root = y; 697 if ((y = root->right) == NULL) 698 break; 699 } 700 /* Link into the new root's left tree. */ 701 lefttreemax->right = root; 702 lefttreemax = root; 703 } else 704 break; 705 } 706 /* Assemble the new root. */ 707 lefttreemax->right = root->left; 708 righttreemin->left = root->right; 709 root->left = dummy.right; 710 root->right = dummy.left; 711 return (root); 712} 713 714/* 715 * vm_page_insert: [ internal use only ] 716 * 717 * Inserts the given mem entry into the object and object list. 718 * 719 * The pagetables are not updated but will presumably fault the page 720 * in if necessary, or if a kernel page the caller will at some point 721 * enter the page into the kernel's pmap. We are not allowed to block 722 * here so we *can't* do this anyway. 723 * 724 * The object and page must be locked. 725 * This routine may not block. 726 */ 727void 728vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex) 729{ 730 vm_page_t root; 731 732 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 733 if (m->object != NULL) 734 panic("vm_page_insert: page already inserted"); 735 736 /* 737 * Record the object/offset pair in this page 738 */ 739 m->object = object; 740 m->pindex = pindex; 741 742 /* 743 * Now link into the object's ordered list of backed pages. 744 */ 745 root = object->root; 746 if (root == NULL) { 747 m->left = NULL; 748 m->right = NULL; 749 TAILQ_INSERT_TAIL(&object->memq, m, listq); 750 } else { 751 root = vm_page_splay(pindex, root); 752 if (pindex < root->pindex) { 753 m->left = root->left; 754 m->right = root; 755 root->left = NULL; 756 TAILQ_INSERT_BEFORE(root, m, listq); 757 } else if (pindex == root->pindex) 758 panic("vm_page_insert: offset already allocated"); 759 else { 760 m->right = root->right; 761 m->left = root; 762 root->right = NULL; 763 TAILQ_INSERT_AFTER(&object->memq, root, m, listq); 764 } 765 } 766 object->root = m; 767 object->generation++; 768 769 /* 770 * show that the object has one more resident page. 771 */ 772 object->resident_page_count++; 773 /* 774 * Hold the vnode until the last page is released. 775 */ 776 if (object->resident_page_count == 1 && object->type == OBJT_VNODE) 777 vhold((struct vnode *)object->handle); 778 779 /* 780 * Since we are inserting a new and possibly dirty page, 781 * update the object's OBJ_MIGHTBEDIRTY flag. 782 */ 783 if (m->flags & PG_WRITEABLE) 784 vm_object_set_writeable_dirty(object); 785} 786 787/* 788 * vm_page_remove: 789 * NOTE: used by device pager as well -wfj 790 * 791 * Removes the given mem entry from the object/offset-page 792 * table and the object page list, but do not invalidate/terminate 793 * the backing store. 794 * 795 * The object and page must be locked. 796 * The underlying pmap entry (if any) is NOT removed here. 797 * This routine may not block. 798 */ 799void 800vm_page_remove(vm_page_t m) 801{ 802 vm_object_t object; 803 vm_page_t root; 804 805 if ((m->flags & PG_UNMANAGED) == 0) 806 vm_page_lock_assert(m, MA_OWNED); 807 if ((object = m->object) == NULL) 808 return; 809 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 810 if (m->oflags & VPO_BUSY) { 811 m->oflags &= ~VPO_BUSY; 812 vm_page_flash(m); 813 } 814 815 /* 816 * Now remove from the object's list of backed pages. 817 */ 818 if (m != object->root) 819 vm_page_splay(m->pindex, object->root); 820 if (m->left == NULL) 821 root = m->right; 822 else { 823 root = vm_page_splay(m->pindex, m->left); 824 root->right = m->right; 825 } 826 object->root = root; 827 TAILQ_REMOVE(&object->memq, m, listq); 828 829 /* 830 * And show that the object has one fewer resident page. 831 */ 832 object->resident_page_count--; 833 object->generation++; 834 /* 835 * The vnode may now be recycled. 836 */ 837 if (object->resident_page_count == 0 && object->type == OBJT_VNODE) 838 vdrop((struct vnode *)object->handle); 839 840 m->object = NULL; 841} 842 843/* 844 * vm_page_lookup: 845 * 846 * Returns the page associated with the object/offset 847 * pair specified; if none is found, NULL is returned. 848 * 849 * The object must be locked. 850 * This routine may not block. 851 * This is a critical path routine 852 */ 853vm_page_t 854vm_page_lookup(vm_object_t object, vm_pindex_t pindex) 855{ 856 vm_page_t m; 857 858 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 859 if ((m = object->root) != NULL && m->pindex != pindex) { 860 m = vm_page_splay(pindex, m); 861 if ((object->root = m)->pindex != pindex) 862 m = NULL; 863 } 864 return (m); 865} 866 867/* 868 * vm_page_rename: 869 * 870 * Move the given memory entry from its 871 * current object to the specified target object/offset. 872 * 873 * The object must be locked. 874 * This routine may not block. 875 * 876 * Note: swap associated with the page must be invalidated by the move. We 877 * have to do this for several reasons: (1) we aren't freeing the 878 * page, (2) we are dirtying the page, (3) the VM system is probably 879 * moving the page from object A to B, and will then later move 880 * the backing store from A to B and we can't have a conflict. 881 * 882 * Note: we *always* dirty the page. It is necessary both for the 883 * fact that we moved it, and because we may be invalidating 884 * swap. If the page is on the cache, we have to deactivate it 885 * or vm_page_dirty() will panic. Dirty pages are not allowed 886 * on the cache. 887 */ 888void 889vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex) 890{ 891 892 vm_page_remove(m); 893 vm_page_insert(m, new_object, new_pindex); 894 vm_page_dirty(m); 895} 896 897/* 898 * Convert all of the given object's cached pages that have a 899 * pindex within the given range into free pages. If the value 900 * zero is given for "end", then the range's upper bound is 901 * infinity. If the given object is backed by a vnode and it 902 * transitions from having one or more cached pages to none, the 903 * vnode's hold count is reduced. 904 */ 905void 906vm_page_cache_free(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 907{ 908 vm_page_t m, m_next; 909 boolean_t empty; 910 911 mtx_lock(&vm_page_queue_free_mtx); 912 if (__predict_false(object->cache == NULL)) { 913 mtx_unlock(&vm_page_queue_free_mtx); 914 return; 915 } 916 m = object->cache = vm_page_splay(start, object->cache); 917 if (m->pindex < start) { 918 if (m->right == NULL) 919 m = NULL; 920 else { 921 m_next = vm_page_splay(start, m->right); 922 m_next->left = m; 923 m->right = NULL; 924 m = object->cache = m_next; 925 } 926 } 927 928 /* 929 * At this point, "m" is either (1) a reference to the page 930 * with the least pindex that is greater than or equal to 931 * "start" or (2) NULL. 932 */ 933 for (; m != NULL && (m->pindex < end || end == 0); m = m_next) { 934 /* 935 * Find "m"'s successor and remove "m" from the 936 * object's cache. 937 */ 938 if (m->right == NULL) { 939 object->cache = m->left; 940 m_next = NULL; 941 } else { 942 m_next = vm_page_splay(start, m->right); 943 m_next->left = m->left; 944 object->cache = m_next; 945 } 946 /* Convert "m" to a free page. */ 947 m->object = NULL; 948 m->valid = 0; 949 /* Clear PG_CACHED and set PG_FREE. */ 950 m->flags ^= PG_CACHED | PG_FREE; 951 KASSERT((m->flags & (PG_CACHED | PG_FREE)) == PG_FREE, 952 ("vm_page_cache_free: page %p has inconsistent flags", m)); 953 cnt.v_cache_count--; 954 cnt.v_free_count++; 955 } 956 empty = object->cache == NULL; 957 mtx_unlock(&vm_page_queue_free_mtx); 958 if (object->type == OBJT_VNODE && empty) 959 vdrop(object->handle); 960} 961 962/* 963 * Returns the cached page that is associated with the given 964 * object and offset. If, however, none exists, returns NULL. 965 * 966 * The free page queue must be locked. 967 */ 968static inline vm_page_t 969vm_page_cache_lookup(vm_object_t object, vm_pindex_t pindex) 970{ 971 vm_page_t m; 972 973 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 974 if ((m = object->cache) != NULL && m->pindex != pindex) { 975 m = vm_page_splay(pindex, m); 976 if ((object->cache = m)->pindex != pindex) 977 m = NULL; 978 } 979 return (m); 980} 981 982/* 983 * Remove the given cached page from its containing object's 984 * collection of cached pages. 985 * 986 * The free page queue must be locked. 987 */ 988void 989vm_page_cache_remove(vm_page_t m) 990{ 991 vm_object_t object; 992 vm_page_t root; 993 994 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 995 KASSERT((m->flags & PG_CACHED) != 0, 996 ("vm_page_cache_remove: page %p is not cached", m)); 997 object = m->object; 998 if (m != object->cache) { 999 root = vm_page_splay(m->pindex, object->cache); 1000 KASSERT(root == m, 1001 ("vm_page_cache_remove: page %p is not cached in object %p", 1002 m, object)); 1003 } 1004 if (m->left == NULL) 1005 root = m->right; 1006 else if (m->right == NULL) 1007 root = m->left; 1008 else { 1009 root = vm_page_splay(m->pindex, m->left); 1010 root->right = m->right; 1011 } 1012 object->cache = root; 1013 m->object = NULL; 1014 cnt.v_cache_count--; 1015} 1016 1017/* 1018 * Transfer all of the cached pages with offset greater than or 1019 * equal to 'offidxstart' from the original object's cache to the 1020 * new object's cache. However, any cached pages with offset 1021 * greater than or equal to the new object's size are kept in the 1022 * original object. Initially, the new object's cache must be 1023 * empty. Offset 'offidxstart' in the original object must 1024 * correspond to offset zero in the new object. 1025 * 1026 * The new object must be locked. 1027 */ 1028void 1029vm_page_cache_transfer(vm_object_t orig_object, vm_pindex_t offidxstart, 1030 vm_object_t new_object) 1031{ 1032 vm_page_t m, m_next; 1033 1034 /* 1035 * Insertion into an object's collection of cached pages 1036 * requires the object to be locked. In contrast, removal does 1037 * not. 1038 */ 1039 VM_OBJECT_LOCK_ASSERT(new_object, MA_OWNED); 1040 KASSERT(new_object->cache == NULL, 1041 ("vm_page_cache_transfer: object %p has cached pages", 1042 new_object)); 1043 mtx_lock(&vm_page_queue_free_mtx); 1044 if ((m = orig_object->cache) != NULL) { 1045 /* 1046 * Transfer all of the pages with offset greater than or 1047 * equal to 'offidxstart' from the original object's 1048 * cache to the new object's cache. 1049 */ 1050 m = vm_page_splay(offidxstart, m); 1051 if (m->pindex < offidxstart) { 1052 orig_object->cache = m; 1053 new_object->cache = m->right; 1054 m->right = NULL; 1055 } else { 1056 orig_object->cache = m->left; 1057 new_object->cache = m; 1058 m->left = NULL; 1059 } 1060 while ((m = new_object->cache) != NULL) { 1061 if ((m->pindex - offidxstart) >= new_object->size) { 1062 /* 1063 * Return all of the cached pages with 1064 * offset greater than or equal to the 1065 * new object's size to the original 1066 * object's cache. 1067 */ 1068 new_object->cache = m->left; 1069 m->left = orig_object->cache; 1070 orig_object->cache = m; 1071 break; 1072 } 1073 m_next = vm_page_splay(m->pindex, m->right); 1074 /* Update the page's object and offset. */ 1075 m->object = new_object; 1076 m->pindex -= offidxstart; 1077 if (m_next == NULL) 1078 break; 1079 m->right = NULL; 1080 m_next->left = m; 1081 new_object->cache = m_next; 1082 } 1083 KASSERT(new_object->cache == NULL || 1084 new_object->type == OBJT_SWAP, 1085 ("vm_page_cache_transfer: object %p's type is incompatible" 1086 " with cached pages", new_object)); 1087 } 1088 mtx_unlock(&vm_page_queue_free_mtx); 1089} 1090 1091/* 1092 * vm_page_alloc: 1093 * 1094 * Allocate and return a memory cell associated 1095 * with this VM object/offset pair. 1096 * 1097 * page_req classes: 1098 * VM_ALLOC_NORMAL normal process request 1099 * VM_ALLOC_SYSTEM system *really* needs a page 1100 * VM_ALLOC_INTERRUPT interrupt time request 1101 * VM_ALLOC_ZERO zero page 1102 * VM_ALLOC_WIRED wire the allocated page 1103 * VM_ALLOC_NOOBJ page is not associated with a vm object 1104 * VM_ALLOC_NOBUSY do not set the page busy 1105 * VM_ALLOC_IFNOTCACHED return NULL, do not reactivate if the page 1106 * is cached 1107 * 1108 * This routine may not sleep. 1109 */ 1110vm_page_t 1111vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) 1112{ 1113 struct vnode *vp = NULL; 1114 vm_object_t m_object; 1115 vm_page_t m; 1116 int flags, page_req; 1117 1118 page_req = req & VM_ALLOC_CLASS_MASK; 1119 KASSERT(curthread->td_intr_nesting_level == 0 || 1120 page_req == VM_ALLOC_INTERRUPT, 1121 ("vm_page_alloc(NORMAL|SYSTEM) in interrupt context")); 1122 1123 if ((req & VM_ALLOC_NOOBJ) == 0) { 1124 KASSERT(object != NULL, 1125 ("vm_page_alloc: NULL object.")); 1126 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1127 } 1128 1129 /* 1130 * The pager is allowed to eat deeper into the free page list. 1131 */ 1132 if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) { 1133 page_req = VM_ALLOC_SYSTEM; 1134 }; 1135 1136 mtx_lock(&vm_page_queue_free_mtx); 1137 if (cnt.v_free_count + cnt.v_cache_count > cnt.v_free_reserved || 1138 (page_req == VM_ALLOC_SYSTEM && 1139 cnt.v_free_count + cnt.v_cache_count > cnt.v_interrupt_free_min) || 1140 (page_req == VM_ALLOC_INTERRUPT && 1141 cnt.v_free_count + cnt.v_cache_count > 0)) { 1142 /* 1143 * Allocate from the free queue if the number of free pages 1144 * exceeds the minimum for the request class. 1145 */ 1146 if (object != NULL && 1147 (m = vm_page_cache_lookup(object, pindex)) != NULL) { 1148 if ((req & VM_ALLOC_IFNOTCACHED) != 0) { 1149 mtx_unlock(&vm_page_queue_free_mtx); 1150 return (NULL); 1151 } 1152 if (vm_phys_unfree_page(m)) 1153 vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, 0); 1154#if VM_NRESERVLEVEL > 0 1155 else if (!vm_reserv_reactivate_page(m)) 1156#else 1157 else 1158#endif 1159 panic("vm_page_alloc: cache page %p is missing" 1160 " from the free queue", m); 1161 } else if ((req & VM_ALLOC_IFCACHED) != 0) { 1162 mtx_unlock(&vm_page_queue_free_mtx); 1163 return (NULL); 1164#if VM_NRESERVLEVEL > 0 1165 } else if (object == NULL || object->type == OBJT_DEVICE || 1166 object->type == OBJT_SG || 1167 (object->flags & OBJ_COLORED) == 0 || 1168 (m = vm_reserv_alloc_page(object, pindex)) == NULL) { 1169#else 1170 } else { 1171#endif 1172 m = vm_phys_alloc_pages(object != NULL ? 1173 VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0); 1174#if VM_NRESERVLEVEL > 0 1175 if (m == NULL && vm_reserv_reclaim_inactive()) { 1176 m = vm_phys_alloc_pages(object != NULL ? 1177 VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 1178 0); 1179 } 1180#endif 1181 } 1182 } else { 1183 /* 1184 * Not allocatable, give up. 1185 */ 1186 mtx_unlock(&vm_page_queue_free_mtx); 1187 atomic_add_int(&vm_pageout_deficit, 1); 1188 pagedaemon_wakeup(); 1189 return (NULL); 1190 } 1191 1192 /* 1193 * At this point we had better have found a good page. 1194 */ 1195 1196 KASSERT(m != NULL, ("vm_page_alloc: missing page")); 1197 KASSERT(m->queue == PQ_NONE, 1198 ("vm_page_alloc: page %p has unexpected queue %d", m, m->queue)); 1199 KASSERT(m->wire_count == 0, ("vm_page_alloc: page %p is wired", m)); 1200 KASSERT(m->hold_count == 0, ("vm_page_alloc: page %p is held", m)); 1201 KASSERT(m->busy == 0, ("vm_page_alloc: page %p is busy", m)); 1202 KASSERT(m->dirty == 0, ("vm_page_alloc: page %p is dirty", m)); 1203 KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT, 1204 ("vm_page_alloc: page %p has unexpected memattr %d", m, 1205 pmap_page_get_memattr(m))); 1206 if ((m->flags & PG_CACHED) != 0) { 1207 KASSERT(m->valid != 0, 1208 ("vm_page_alloc: cached page %p is invalid", m)); 1209 if (m->object == object && m->pindex == pindex) 1210 cnt.v_reactivated++; 1211 else 1212 m->valid = 0; 1213 m_object = m->object; 1214 vm_page_cache_remove(m); 1215 if (m_object->type == OBJT_VNODE && m_object->cache == NULL) 1216 vp = m_object->handle; 1217 } else { 1218 KASSERT(VM_PAGE_IS_FREE(m), 1219 ("vm_page_alloc: page %p is not free", m)); 1220 KASSERT(m->valid == 0, 1221 ("vm_page_alloc: free page %p is valid", m)); 1222 cnt.v_free_count--; 1223 } 1224 1225 /* 1226 * Initialize structure. Only the PG_ZERO flag is inherited. 1227 */ 1228 flags = 0; 1229 if (m->flags & PG_ZERO) { 1230 vm_page_zero_count--; 1231 if (req & VM_ALLOC_ZERO) 1232 flags = PG_ZERO; 1233 } 1234 if (object == NULL || object->type == OBJT_PHYS) 1235 flags |= PG_UNMANAGED; 1236 m->flags = flags; 1237 if (req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ)) 1238 m->oflags = 0; 1239 else 1240 m->oflags = VPO_BUSY; 1241 if (req & VM_ALLOC_WIRED) { 1242 atomic_add_int(&cnt.v_wire_count, 1); 1243 m->wire_count = 1; 1244 } 1245 m->act_count = 0; 1246 mtx_unlock(&vm_page_queue_free_mtx); 1247 1248 if (object != NULL) { 1249 /* Ignore device objects; the pager sets "memattr" for them. */ 1250 if (object->memattr != VM_MEMATTR_DEFAULT && 1251 object->type != OBJT_DEVICE && object->type != OBJT_SG) 1252 pmap_page_set_memattr(m, object->memattr); 1253 vm_page_insert(m, object, pindex); 1254 } else 1255 m->pindex = pindex; 1256 1257 /* 1258 * The following call to vdrop() must come after the above call 1259 * to vm_page_insert() in case both affect the same object and 1260 * vnode. Otherwise, the affected vnode's hold count could 1261 * temporarily become zero. 1262 */ 1263 if (vp != NULL) 1264 vdrop(vp); 1265 1266 /* 1267 * Don't wakeup too often - wakeup the pageout daemon when 1268 * we would be nearly out of memory. 1269 */ 1270 if (vm_paging_needed()) 1271 pagedaemon_wakeup(); 1272 1273 return (m); 1274} 1275 1276/* 1277 * vm_wait: (also see VM_WAIT macro) 1278 * 1279 * Block until free pages are available for allocation 1280 * - Called in various places before memory allocations. 1281 */ 1282void 1283vm_wait(void) 1284{ 1285 1286 mtx_lock(&vm_page_queue_free_mtx); 1287 if (curproc == pageproc) { 1288 vm_pageout_pages_needed = 1; 1289 msleep(&vm_pageout_pages_needed, &vm_page_queue_free_mtx, 1290 PDROP | PSWP, "VMWait", 0); 1291 } else { 1292 if (!vm_pages_needed) { 1293 vm_pages_needed = 1; 1294 wakeup(&vm_pages_needed); 1295 } 1296 msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PVM, 1297 "vmwait", 0); 1298 } 1299} 1300 1301/* 1302 * vm_waitpfault: (also see VM_WAITPFAULT macro) 1303 * 1304 * Block until free pages are available for allocation 1305 * - Called only in vm_fault so that processes page faulting 1306 * can be easily tracked. 1307 * - Sleeps at a lower priority than vm_wait() so that vm_wait()ing 1308 * processes will be able to grab memory first. Do not change 1309 * this balance without careful testing first. 1310 */ 1311void 1312vm_waitpfault(void) 1313{ 1314 1315 mtx_lock(&vm_page_queue_free_mtx); 1316 if (!vm_pages_needed) { 1317 vm_pages_needed = 1; 1318 wakeup(&vm_pages_needed); 1319 } 1320 msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PUSER, 1321 "pfault", 0); 1322} 1323 1324/* 1325 * vm_page_requeue: 1326 * 1327 * If the given page is contained within a page queue, move it to the tail 1328 * of that queue. 1329 * 1330 * The page queues must be locked. 1331 */ 1332void 1333vm_page_requeue(vm_page_t m) 1334{ 1335 int queue = VM_PAGE_GETQUEUE(m); 1336 struct vpgqueues *vpq; 1337 1338 if (queue != PQ_NONE) { 1339 vpq = &vm_page_queues[queue]; 1340 TAILQ_REMOVE(&vpq->pl, m, pageq); 1341 TAILQ_INSERT_TAIL(&vpq->pl, m, pageq); 1342 } 1343} 1344 1345/* 1346 * vm_page_queue_remove: 1347 * 1348 * Remove the given page from the specified queue. 1349 * 1350 * The page and page queues must be locked. 1351 */ 1352static __inline void 1353vm_page_queue_remove(int queue, vm_page_t m) 1354{ 1355 struct vpgqueues *pq; 1356 1357 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1358 vm_page_lock_assert(m, MA_OWNED); 1359 pq = &vm_page_queues[queue]; 1360 TAILQ_REMOVE(&pq->pl, m, pageq); 1361 (*pq->cnt)--; 1362} 1363 1364/* 1365 * vm_pageq_remove: 1366 * 1367 * Remove a page from its queue. 1368 * 1369 * The given page must be locked. 1370 * This routine may not block. 1371 */ 1372void 1373vm_pageq_remove(vm_page_t m) 1374{ 1375 int queue = VM_PAGE_GETQUEUE(m); 1376 1377 vm_page_lock_assert(m, MA_OWNED); 1378 if (queue != PQ_NONE) { 1379 vm_page_lock_queues(); 1380 VM_PAGE_SETQUEUE2(m, PQ_NONE); 1381 vm_page_queue_remove(queue, m); 1382 vm_page_unlock_queues(); 1383 } 1384} 1385 1386/* 1387 * vm_page_enqueue: 1388 * 1389 * Add the given page to the specified queue. 1390 * 1391 * The page queues must be locked. 1392 */ 1393static void 1394vm_page_enqueue(int queue, vm_page_t m) 1395{ 1396 struct vpgqueues *vpq; 1397 1398 vpq = &vm_page_queues[queue]; 1399 VM_PAGE_SETQUEUE2(m, queue); 1400 TAILQ_INSERT_TAIL(&vpq->pl, m, pageq); 1401 ++*vpq->cnt; 1402} 1403 1404/* 1405 * vm_page_activate: 1406 * 1407 * Put the specified page on the active list (if appropriate). 1408 * Ensure that act_count is at least ACT_INIT but do not otherwise 1409 * mess with it. 1410 * 1411 * The page must be locked. 1412 * This routine may not block. 1413 */ 1414void 1415vm_page_activate(vm_page_t m) 1416{ 1417 int queue; 1418 1419 vm_page_lock_assert(m, MA_OWNED); 1420 if ((queue = VM_PAGE_GETKNOWNQUEUE2(m)) != PQ_ACTIVE) { 1421 if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) { 1422 if (m->act_count < ACT_INIT) 1423 m->act_count = ACT_INIT; 1424 vm_page_lock_queues(); 1425 if (queue != PQ_NONE) 1426 vm_page_queue_remove(queue, m); 1427 vm_page_enqueue(PQ_ACTIVE, m); 1428 vm_page_unlock_queues(); 1429 } else 1430 KASSERT(queue == PQ_NONE, 1431 ("vm_page_activate: wired page %p is queued", m)); 1432 } else { 1433 if (m->act_count < ACT_INIT) 1434 m->act_count = ACT_INIT; 1435 } 1436} 1437 1438/* 1439 * vm_page_free_wakeup: 1440 * 1441 * Helper routine for vm_page_free_toq() and vm_page_cache(). This 1442 * routine is called when a page has been added to the cache or free 1443 * queues. 1444 * 1445 * The page queues must be locked. 1446 * This routine may not block. 1447 */ 1448static inline void 1449vm_page_free_wakeup(void) 1450{ 1451 1452 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 1453 /* 1454 * if pageout daemon needs pages, then tell it that there are 1455 * some free. 1456 */ 1457 if (vm_pageout_pages_needed && 1458 cnt.v_cache_count + cnt.v_free_count >= cnt.v_pageout_free_min) { 1459 wakeup(&vm_pageout_pages_needed); 1460 vm_pageout_pages_needed = 0; 1461 } 1462 /* 1463 * wakeup processes that are waiting on memory if we hit a 1464 * high water mark. And wakeup scheduler process if we have 1465 * lots of memory. this process will swapin processes. 1466 */ 1467 if (vm_pages_needed && !vm_page_count_min()) { 1468 vm_pages_needed = 0; 1469 wakeup(&cnt.v_free_count); 1470 } 1471} 1472 1473/* 1474 * vm_page_free_toq: 1475 * 1476 * Returns the given page to the free list, 1477 * disassociating it with any VM object. 1478 * 1479 * Object and page must be locked prior to entry. 1480 * This routine may not block. 1481 */ 1482 1483void 1484vm_page_free_toq(vm_page_t m) 1485{ 1486 1487 if ((m->flags & PG_UNMANAGED) == 0) { 1488 vm_page_lock_assert(m, MA_OWNED); 1489 KASSERT(!pmap_page_is_mapped(m), 1490 ("vm_page_free_toq: freeing mapped page %p", m)); 1491 } 1492 PCPU_INC(cnt.v_tfree); 1493 1494 if (m->busy || VM_PAGE_IS_FREE(m)) { 1495 printf( 1496 "vm_page_free: pindex(%lu), busy(%d), VPO_BUSY(%d), hold(%d)\n", 1497 (u_long)m->pindex, m->busy, (m->oflags & VPO_BUSY) ? 1 : 0, 1498 m->hold_count); 1499 if (VM_PAGE_IS_FREE(m)) 1500 panic("vm_page_free: freeing free page"); 1501 else 1502 panic("vm_page_free: freeing busy page"); 1503 } 1504 1505 /* 1506 * unqueue, then remove page. Note that we cannot destroy 1507 * the page here because we do not want to call the pager's 1508 * callback routine until after we've put the page on the 1509 * appropriate free queue. 1510 */ 1511 if ((m->flags & PG_UNMANAGED) == 0) 1512 vm_pageq_remove(m); 1513 vm_page_remove(m); 1514 1515 /* 1516 * If fictitious remove object association and 1517 * return, otherwise delay object association removal. 1518 */ 1519 if ((m->flags & PG_FICTITIOUS) != 0) { 1520 return; 1521 } 1522 1523 m->valid = 0; 1524 vm_page_undirty(m); 1525 1526 if (m->wire_count != 0) { 1527 if (m->wire_count > 1) { 1528 panic("vm_page_free: invalid wire count (%d), pindex: 0x%lx", 1529 m->wire_count, (long)m->pindex); 1530 } 1531 panic("vm_page_free: freeing wired page"); 1532 } 1533 if (m->hold_count != 0) { 1534 m->flags &= ~PG_ZERO; 1535 vm_page_lock_queues(); 1536 vm_page_enqueue(PQ_HOLD, m); 1537 vm_page_unlock_queues(); 1538 } else { 1539 /* 1540 * Restore the default memory attribute to the page. 1541 */ 1542 if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT) 1543 pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT); 1544 1545 /* 1546 * Insert the page into the physical memory allocator's 1547 * cache/free page queues. 1548 */ 1549 mtx_lock(&vm_page_queue_free_mtx); 1550 m->flags |= PG_FREE; 1551 cnt.v_free_count++; 1552#if VM_NRESERVLEVEL > 0 1553 if (!vm_reserv_free_page(m)) 1554#else 1555 if (TRUE) 1556#endif 1557 vm_phys_free_pages(m, 0); 1558 if ((m->flags & PG_ZERO) != 0) 1559 ++vm_page_zero_count; 1560 else 1561 vm_page_zero_idle_wakeup(); 1562 vm_page_free_wakeup(); 1563 mtx_unlock(&vm_page_queue_free_mtx); 1564 } 1565} 1566 1567/* 1568 * vm_page_wire: 1569 * 1570 * Mark this page as wired down by yet 1571 * another map, removing it from paging queues 1572 * as necessary. 1573 * 1574 * The page must be locked. 1575 * This routine may not block. 1576 */ 1577void 1578vm_page_wire(vm_page_t m) 1579{ 1580 1581 /* 1582 * Only bump the wire statistics if the page is not already wired, 1583 * and only unqueue the page if it is on some queue (if it is unmanaged 1584 * it is already off the queues). 1585 */ 1586 vm_page_lock_assert(m, MA_OWNED); 1587 if (m->flags & PG_FICTITIOUS) 1588 return; 1589 if (m->wire_count == 0) { 1590 if ((m->flags & PG_UNMANAGED) == 0) 1591 vm_pageq_remove(m); 1592 atomic_add_int(&cnt.v_wire_count, 1); 1593 } 1594 m->wire_count++; 1595 KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m)); 1596} 1597 1598/* 1599 * vm_page_unwire: 1600 * 1601 * Release one wiring of this page, potentially 1602 * enabling it to be paged again. 1603 * 1604 * Many pages placed on the inactive queue should actually go 1605 * into the cache, but it is difficult to figure out which. What 1606 * we do instead, if the inactive target is well met, is to put 1607 * clean pages at the head of the inactive queue instead of the tail. 1608 * This will cause them to be moved to the cache more quickly and 1609 * if not actively re-referenced, freed more quickly. If we just 1610 * stick these pages at the end of the inactive queue, heavy filesystem 1611 * meta-data accesses can cause an unnecessary paging load on memory bound 1612 * processes. This optimization causes one-time-use metadata to be 1613 * reused more quickly. 1614 * 1615 * BUT, if we are in a low-memory situation we have no choice but to 1616 * put clean pages on the cache queue. 1617 * 1618 * A number of routines use vm_page_unwire() to guarantee that the page 1619 * will go into either the inactive or active queues, and will NEVER 1620 * be placed in the cache - for example, just after dirtying a page. 1621 * dirty pages in the cache are not allowed. 1622 * 1623 * The page must be locked. 1624 * This routine may not block. 1625 */ 1626void 1627vm_page_unwire(vm_page_t m, int activate) 1628{ 1629 1630 if ((m->flags & PG_UNMANAGED) == 0) 1631 vm_page_lock_assert(m, MA_OWNED); 1632 if (m->flags & PG_FICTITIOUS) 1633 return; 1634 if (m->wire_count > 0) { 1635 m->wire_count--; 1636 if (m->wire_count == 0) { 1637 atomic_subtract_int(&cnt.v_wire_count, 1); 1638 if ((m->flags & PG_UNMANAGED) != 0) 1639 return; 1640 vm_page_lock_queues(); 1641 if (activate) 1642 vm_page_enqueue(PQ_ACTIVE, m); 1643 else { 1644 vm_page_flag_clear(m, PG_WINATCFLS); 1645 vm_page_enqueue(PQ_INACTIVE, m); 1646 } 1647 vm_page_unlock_queues(); 1648 } 1649 } else { 1650 panic("vm_page_unwire: invalid wire count: %d", m->wire_count); 1651 } 1652} 1653 1654/* 1655 * Move the specified page to the inactive queue. 1656 * 1657 * Normally athead is 0 resulting in LRU operation. athead is set 1658 * to 1 if we want this page to be 'as if it were placed in the cache', 1659 * except without unmapping it from the process address space. 1660 * 1661 * This routine may not block. 1662 */ 1663static inline void 1664_vm_page_deactivate(vm_page_t m, int athead) 1665{ 1666 int queue; 1667 1668 vm_page_lock_assert(m, MA_OWNED); 1669 1670 /* 1671 * Ignore if already inactive. 1672 */ 1673 if ((queue = VM_PAGE_GETKNOWNQUEUE2(m)) == PQ_INACTIVE) 1674 return; 1675 if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) { 1676 vm_page_lock_queues(); 1677 vm_page_flag_clear(m, PG_WINATCFLS); 1678 if (queue != PQ_NONE) 1679 vm_page_queue_remove(queue, m); 1680 if (athead) 1681 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE].pl, m, 1682 pageq); 1683 else 1684 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, 1685 pageq); 1686 VM_PAGE_SETQUEUE2(m, PQ_INACTIVE); 1687 cnt.v_inactive_count++; 1688 vm_page_unlock_queues(); 1689 } 1690} 1691 1692/* 1693 * Move the specified page to the inactive queue. 1694 * 1695 * The page must be locked. 1696 */ 1697void 1698vm_page_deactivate(vm_page_t m) 1699{ 1700 1701 _vm_page_deactivate(m, 0); 1702} 1703 1704/* 1705 * vm_page_try_to_cache: 1706 * 1707 * Returns 0 on failure, 1 on success 1708 */ 1709int 1710vm_page_try_to_cache(vm_page_t m) 1711{ 1712 1713 vm_page_lock_assert(m, MA_OWNED); 1714 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1715 if (m->dirty || m->hold_count || m->busy || m->wire_count || 1716 (m->oflags & VPO_BUSY) || (m->flags & PG_UNMANAGED)) 1717 return (0); 1718 pmap_remove_all(m); 1719 if (m->dirty) 1720 return (0); 1721 vm_page_cache(m); 1722 return (1); 1723} 1724 1725/* 1726 * vm_page_try_to_free() 1727 * 1728 * Attempt to free the page. If we cannot free it, we do nothing. 1729 * 1 is returned on success, 0 on failure. 1730 */ 1731int 1732vm_page_try_to_free(vm_page_t m) 1733{ 1734 1735 vm_page_lock_assert(m, MA_OWNED); 1736 if (m->object != NULL) 1737 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1738 if (m->dirty || m->hold_count || m->busy || m->wire_count || 1739 (m->oflags & VPO_BUSY) || (m->flags & PG_UNMANAGED)) 1740 return (0); 1741 pmap_remove_all(m); 1742 if (m->dirty) 1743 return (0); 1744 vm_page_free(m); 1745 return (1); 1746} 1747 1748/* 1749 * vm_page_cache 1750 * 1751 * Put the specified page onto the page cache queue (if appropriate). 1752 * 1753 * This routine may not block. 1754 */ 1755void 1756vm_page_cache(vm_page_t m) 1757{ 1758 vm_object_t object; 1759 vm_page_t root; 1760 1761 vm_page_lock_assert(m, MA_OWNED); 1762 object = m->object; 1763 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1764 if ((m->flags & PG_UNMANAGED) || (m->oflags & VPO_BUSY) || m->busy || 1765 m->hold_count || m->wire_count) 1766 panic("vm_page_cache: attempting to cache busy page"); 1767 pmap_remove_all(m); 1768 if (m->dirty != 0) 1769 panic("vm_page_cache: page %p is dirty", m); 1770 if (m->valid == 0 || object->type == OBJT_DEFAULT || 1771 (object->type == OBJT_SWAP && 1772 !vm_pager_has_page(object, m->pindex, NULL, NULL))) { 1773 /* 1774 * Hypothesis: A cache-elgible page belonging to a 1775 * default object or swap object but without a backing 1776 * store must be zero filled. 1777 */ 1778 vm_page_free(m); 1779 return; 1780 } 1781 KASSERT((m->flags & PG_CACHED) == 0, 1782 ("vm_page_cache: page %p is already cached", m)); 1783 PCPU_INC(cnt.v_tcached); 1784 1785 /* 1786 * Remove the page from the paging queues. 1787 */ 1788 vm_pageq_remove(m); 1789 1790 /* 1791 * Remove the page from the object's collection of resident 1792 * pages. 1793 */ 1794 if (m != object->root) 1795 vm_page_splay(m->pindex, object->root); 1796 if (m->left == NULL) 1797 root = m->right; 1798 else { 1799 root = vm_page_splay(m->pindex, m->left); 1800 root->right = m->right; 1801 } 1802 object->root = root; 1803 TAILQ_REMOVE(&object->memq, m, listq); 1804 object->resident_page_count--; 1805 object->generation++; 1806 1807 /* 1808 * Restore the default memory attribute to the page. 1809 */ 1810 if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT) 1811 pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT); 1812 1813 /* 1814 * Insert the page into the object's collection of cached pages 1815 * and the physical memory allocator's cache/free page queues. 1816 */ 1817 m->flags &= ~PG_ZERO; 1818 mtx_lock(&vm_page_queue_free_mtx); 1819 m->flags |= PG_CACHED; 1820 cnt.v_cache_count++; 1821 root = object->cache; 1822 if (root == NULL) { 1823 m->left = NULL; 1824 m->right = NULL; 1825 } else { 1826 root = vm_page_splay(m->pindex, root); 1827 if (m->pindex < root->pindex) { 1828 m->left = root->left; 1829 m->right = root; 1830 root->left = NULL; 1831 } else if (__predict_false(m->pindex == root->pindex)) 1832 panic("vm_page_cache: offset already cached"); 1833 else { 1834 m->right = root->right; 1835 m->left = root; 1836 root->right = NULL; 1837 } 1838 } 1839 object->cache = m; 1840#if VM_NRESERVLEVEL > 0 1841 if (!vm_reserv_free_page(m)) { 1842#else 1843 if (TRUE) { 1844#endif 1845 vm_phys_set_pool(VM_FREEPOOL_CACHE, m, 0); 1846 vm_phys_free_pages(m, 0); 1847 } 1848 vm_page_free_wakeup(); 1849 mtx_unlock(&vm_page_queue_free_mtx); 1850 1851 /* 1852 * Increment the vnode's hold count if this is the object's only 1853 * cached page. Decrement the vnode's hold count if this was 1854 * the object's only resident page. 1855 */ 1856 if (object->type == OBJT_VNODE) { 1857 if (root == NULL && object->resident_page_count != 0) 1858 vhold(object->handle); 1859 else if (root != NULL && object->resident_page_count == 0) 1860 vdrop(object->handle); 1861 } 1862} 1863 1864/* 1865 * vm_page_dontneed 1866 * 1867 * Cache, deactivate, or do nothing as appropriate. This routine 1868 * is typically used by madvise() MADV_DONTNEED. 1869 * 1870 * Generally speaking we want to move the page into the cache so 1871 * it gets reused quickly. However, this can result in a silly syndrome 1872 * due to the page recycling too quickly. Small objects will not be 1873 * fully cached. On the otherhand, if we move the page to the inactive 1874 * queue we wind up with a problem whereby very large objects 1875 * unnecessarily blow away our inactive and cache queues. 1876 * 1877 * The solution is to move the pages based on a fixed weighting. We 1878 * either leave them alone, deactivate them, or move them to the cache, 1879 * where moving them to the cache has the highest weighting. 1880 * By forcing some pages into other queues we eventually force the 1881 * system to balance the queues, potentially recovering other unrelated 1882 * space from active. The idea is to not force this to happen too 1883 * often. 1884 */ 1885void 1886vm_page_dontneed(vm_page_t m) 1887{ 1888 int dnw; 1889 int head; 1890 1891 vm_page_lock_assert(m, MA_OWNED); 1892 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1893 dnw = PCPU_GET(dnweight); 1894 PCPU_INC(dnweight); 1895 1896 /* 1897 * occassionally leave the page alone 1898 */ 1899 if ((dnw & 0x01F0) == 0 || 1900 VM_PAGE_INQUEUE2(m, PQ_INACTIVE)) { 1901 if (m->act_count >= ACT_INIT) 1902 --m->act_count; 1903 return; 1904 } 1905 1906 /* 1907 * Clear any references to the page. Otherwise, the page daemon will 1908 * immediately reactivate the page. 1909 */ 1910 vm_page_lock_queues(); 1911 vm_page_flag_clear(m, PG_REFERENCED); 1912 vm_page_unlock_queues(); 1913 pmap_clear_reference(m); 1914 1915 if (m->dirty == 0 && pmap_is_modified(m)) 1916 vm_page_dirty(m); 1917 1918 if (m->dirty || (dnw & 0x0070) == 0) { 1919 /* 1920 * Deactivate the page 3 times out of 32. 1921 */ 1922 head = 0; 1923 } else { 1924 /* 1925 * Cache the page 28 times out of every 32. Note that 1926 * the page is deactivated instead of cached, but placed 1927 * at the head of the queue instead of the tail. 1928 */ 1929 head = 1; 1930 } 1931 _vm_page_deactivate(m, head); 1932} 1933 1934/* 1935 * Grab a page, waiting until we are waken up due to the page 1936 * changing state. We keep on waiting, if the page continues 1937 * to be in the object. If the page doesn't exist, first allocate it 1938 * and then conditionally zero it. 1939 * 1940 * This routine may block. 1941 */ 1942vm_page_t 1943vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags) 1944{ 1945 vm_page_t m; 1946 1947 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1948retrylookup: 1949 if ((m = vm_page_lookup(object, pindex)) != NULL) { 1950 if ((m->oflags & VPO_BUSY) != 0 || m->busy != 0) { 1951 if ((allocflags & VM_ALLOC_RETRY) != 0) { 1952 /* 1953 * Reference the page before unlocking and 1954 * sleeping so that the page daemon is less 1955 * likely to reclaim it. 1956 */ 1957 vm_page_lock_queues(); 1958 vm_page_flag_set(m, PG_REFERENCED); 1959 } 1960 vm_page_sleep(m, "pgrbwt"); 1961 if ((allocflags & VM_ALLOC_RETRY) == 0) 1962 return (NULL); 1963 goto retrylookup; 1964 } else { 1965 if ((allocflags & VM_ALLOC_WIRED) != 0) { 1966 vm_page_lock(m); 1967 vm_page_wire(m); 1968 vm_page_unlock(m); 1969 } 1970 if ((allocflags & VM_ALLOC_NOBUSY) == 0) 1971 vm_page_busy(m); 1972 return (m); 1973 } 1974 } 1975 m = vm_page_alloc(object, pindex, allocflags & ~VM_ALLOC_RETRY); 1976 if (m == NULL) { 1977 VM_OBJECT_UNLOCK(object); 1978 VM_WAIT; 1979 VM_OBJECT_LOCK(object); 1980 if ((allocflags & VM_ALLOC_RETRY) == 0) 1981 return (NULL); 1982 goto retrylookup; 1983 } else if (m->valid != 0) 1984 return (m); 1985 if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0) 1986 pmap_zero_page(m); 1987 return (m); 1988} 1989 1990/* 1991 * Mapping function for valid bits or for dirty bits in 1992 * a page. May not block. 1993 * 1994 * Inputs are required to range within a page. 1995 */ 1996int 1997vm_page_bits(int base, int size) 1998{ 1999 int first_bit; 2000 int last_bit; 2001 2002 KASSERT( 2003 base + size <= PAGE_SIZE, 2004 ("vm_page_bits: illegal base/size %d/%d", base, size) 2005 ); 2006 2007 if (size == 0) /* handle degenerate case */ 2008 return (0); 2009 2010 first_bit = base >> DEV_BSHIFT; 2011 last_bit = (base + size - 1) >> DEV_BSHIFT; 2012 2013 return ((2 << last_bit) - (1 << first_bit)); 2014} 2015 2016/* 2017 * vm_page_set_valid: 2018 * 2019 * Sets portions of a page valid. The arguments are expected 2020 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive 2021 * of any partial chunks touched by the range. The invalid portion of 2022 * such chunks will be zeroed. 2023 * 2024 * (base + size) must be less then or equal to PAGE_SIZE. 2025 */ 2026void 2027vm_page_set_valid(vm_page_t m, int base, int size) 2028{ 2029 int endoff, frag; 2030 2031 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2032 if (size == 0) /* handle degenerate case */ 2033 return; 2034 2035 /* 2036 * If the base is not DEV_BSIZE aligned and the valid 2037 * bit is clear, we have to zero out a portion of the 2038 * first block. 2039 */ 2040 if ((frag = base & ~(DEV_BSIZE - 1)) != base && 2041 (m->valid & (1 << (base >> DEV_BSHIFT))) == 0) 2042 pmap_zero_page_area(m, frag, base - frag); 2043 2044 /* 2045 * If the ending offset is not DEV_BSIZE aligned and the 2046 * valid bit is clear, we have to zero out a portion of 2047 * the last block. 2048 */ 2049 endoff = base + size; 2050 if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff && 2051 (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0) 2052 pmap_zero_page_area(m, endoff, 2053 DEV_BSIZE - (endoff & (DEV_BSIZE - 1))); 2054 2055 /* 2056 * Assert that no previously invalid block that is now being validated 2057 * is already dirty. 2058 */ 2059 KASSERT((~m->valid & vm_page_bits(base, size) & m->dirty) == 0, 2060 ("vm_page_set_valid: page %p is dirty", m)); 2061 2062 /* 2063 * Set valid bits inclusive of any overlap. 2064 */ 2065 m->valid |= vm_page_bits(base, size); 2066} 2067 2068/* 2069 * vm_page_set_validclean: 2070 * 2071 * Sets portions of a page valid and clean. The arguments are expected 2072 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive 2073 * of any partial chunks touched by the range. The invalid portion of 2074 * such chunks will be zero'd. 2075 * 2076 * This routine may not block. 2077 * 2078 * (base + size) must be less then or equal to PAGE_SIZE. 2079 */ 2080void 2081vm_page_set_validclean(vm_page_t m, int base, int size) 2082{ 2083 int pagebits; 2084 int frag; 2085 int endoff; 2086 2087 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2088 if (size == 0) /* handle degenerate case */ 2089 return; 2090 2091 /* 2092 * If the base is not DEV_BSIZE aligned and the valid 2093 * bit is clear, we have to zero out a portion of the 2094 * first block. 2095 */ 2096 if ((frag = base & ~(DEV_BSIZE - 1)) != base && 2097 (m->valid & (1 << (base >> DEV_BSHIFT))) == 0) 2098 pmap_zero_page_area(m, frag, base - frag); 2099 2100 /* 2101 * If the ending offset is not DEV_BSIZE aligned and the 2102 * valid bit is clear, we have to zero out a portion of 2103 * the last block. 2104 */ 2105 endoff = base + size; 2106 if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff && 2107 (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0) 2108 pmap_zero_page_area(m, endoff, 2109 DEV_BSIZE - (endoff & (DEV_BSIZE - 1))); 2110 2111 /* 2112 * Set valid, clear dirty bits. If validating the entire 2113 * page we can safely clear the pmap modify bit. We also 2114 * use this opportunity to clear the VPO_NOSYNC flag. If a process 2115 * takes a write fault on a MAP_NOSYNC memory area the flag will 2116 * be set again. 2117 * 2118 * We set valid bits inclusive of any overlap, but we can only 2119 * clear dirty bits for DEV_BSIZE chunks that are fully within 2120 * the range. 2121 */ 2122 pagebits = vm_page_bits(base, size); 2123 m->valid |= pagebits; 2124#if 0 /* NOT YET */ 2125 if ((frag = base & (DEV_BSIZE - 1)) != 0) { 2126 frag = DEV_BSIZE - frag; 2127 base += frag; 2128 size -= frag; 2129 if (size < 0) 2130 size = 0; 2131 } 2132 pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1)); 2133#endif 2134 m->dirty &= ~pagebits; 2135 if (base == 0 && size == PAGE_SIZE) { 2136 pmap_clear_modify(m); 2137 m->oflags &= ~VPO_NOSYNC; 2138 } 2139} 2140 2141void 2142vm_page_clear_dirty(vm_page_t m, int base, int size) 2143{ 2144 2145 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2146 m->dirty &= ~vm_page_bits(base, size); 2147} 2148 2149/* 2150 * vm_page_set_invalid: 2151 * 2152 * Invalidates DEV_BSIZE'd chunks within a page. Both the 2153 * valid and dirty bits for the effected areas are cleared. 2154 * 2155 * May not block. 2156 */ 2157void 2158vm_page_set_invalid(vm_page_t m, int base, int size) 2159{ 2160 int bits; 2161 2162 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2163 KASSERT((m->oflags & VPO_BUSY) == 0, 2164 ("vm_page_set_invalid: page %p is busy", m)); 2165 bits = vm_page_bits(base, size); 2166 if (m->valid == VM_PAGE_BITS_ALL && bits != 0) 2167 pmap_remove_all(m); 2168 KASSERT(!pmap_page_is_mapped(m), 2169 ("vm_page_set_invalid: page %p is mapped", m)); 2170 m->valid &= ~bits; 2171 m->dirty &= ~bits; 2172 m->object->generation++; 2173} 2174 2175/* 2176 * vm_page_zero_invalid() 2177 * 2178 * The kernel assumes that the invalid portions of a page contain 2179 * garbage, but such pages can be mapped into memory by user code. 2180 * When this occurs, we must zero out the non-valid portions of the 2181 * page so user code sees what it expects. 2182 * 2183 * Pages are most often semi-valid when the end of a file is mapped 2184 * into memory and the file's size is not page aligned. 2185 */ 2186void 2187vm_page_zero_invalid(vm_page_t m, boolean_t setvalid) 2188{ 2189 int b; 2190 int i; 2191 2192 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2193 /* 2194 * Scan the valid bits looking for invalid sections that 2195 * must be zerod. Invalid sub-DEV_BSIZE'd areas ( where the 2196 * valid bit may be set ) have already been zerod by 2197 * vm_page_set_validclean(). 2198 */ 2199 for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) { 2200 if (i == (PAGE_SIZE / DEV_BSIZE) || 2201 (m->valid & (1 << i)) 2202 ) { 2203 if (i > b) { 2204 pmap_zero_page_area(m, 2205 b << DEV_BSHIFT, (i - b) << DEV_BSHIFT); 2206 } 2207 b = i + 1; 2208 } 2209 } 2210 2211 /* 2212 * setvalid is TRUE when we can safely set the zero'd areas 2213 * as being valid. We can do this if there are no cache consistancy 2214 * issues. e.g. it is ok to do with UFS, but not ok to do with NFS. 2215 */ 2216 if (setvalid) 2217 m->valid = VM_PAGE_BITS_ALL; 2218} 2219 2220/* 2221 * vm_page_is_valid: 2222 * 2223 * Is (partial) page valid? Note that the case where size == 0 2224 * will return FALSE in the degenerate case where the page is 2225 * entirely invalid, and TRUE otherwise. 2226 * 2227 * May not block. 2228 */ 2229int 2230vm_page_is_valid(vm_page_t m, int base, int size) 2231{ 2232 int bits = vm_page_bits(base, size); 2233 2234 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2235 if (m->valid && ((m->valid & bits) == bits)) 2236 return 1; 2237 else 2238 return 0; 2239} 2240 2241/* 2242 * update dirty bits from pmap/mmu. May not block. 2243 */ 2244void 2245vm_page_test_dirty(vm_page_t m) 2246{ 2247 2248 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2249 if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m)) 2250 vm_page_dirty(m); 2251} 2252 2253int so_zerocp_fullpage = 0; 2254 2255/* 2256 * Replace the given page with a copy. The copied page assumes 2257 * the portion of the given page's "wire_count" that is not the 2258 * responsibility of this copy-on-write mechanism. 2259 * 2260 * The object containing the given page must have a non-zero 2261 * paging-in-progress count and be locked. 2262 */ 2263void 2264vm_page_cowfault(vm_page_t m) 2265{ 2266 vm_page_t mnew; 2267 vm_object_t object; 2268 vm_pindex_t pindex; 2269 2270 mtx_assert(&vm_page_queue_mtx, MA_NOTOWNED); 2271 vm_page_lock_assert(m, MA_OWNED); 2272 object = m->object; 2273 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2274 KASSERT(object->paging_in_progress != 0, 2275 ("vm_page_cowfault: object %p's paging-in-progress count is zero.", 2276 object)); 2277 pindex = m->pindex; 2278 2279 retry_alloc: 2280 pmap_remove_all(m); 2281 vm_page_remove(m); 2282 mnew = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY); 2283 if (mnew == NULL) { 2284 vm_page_insert(m, object, pindex); 2285 vm_page_unlock(m); 2286 VM_OBJECT_UNLOCK(object); 2287 VM_WAIT; 2288 VM_OBJECT_LOCK(object); 2289 if (m == vm_page_lookup(object, pindex)) { 2290 vm_page_lock(m); 2291 goto retry_alloc; 2292 } else { 2293 /* 2294 * Page disappeared during the wait. 2295 */ 2296 return; 2297 } 2298 } 2299 2300 if (m->cow == 0) { 2301 /* 2302 * check to see if we raced with an xmit complete when 2303 * waiting to allocate a page. If so, put things back 2304 * the way they were 2305 */ 2306 vm_page_unlock(m); 2307 vm_page_lock(mnew); 2308 vm_page_free(mnew); 2309 vm_page_unlock(mnew); 2310 vm_page_insert(m, object, pindex); 2311 } else { /* clear COW & copy page */ 2312 if (!so_zerocp_fullpage) 2313 pmap_copy_page(m, mnew); 2314 mnew->valid = VM_PAGE_BITS_ALL; 2315 vm_page_dirty(mnew); 2316 mnew->wire_count = m->wire_count - m->cow; 2317 m->wire_count = m->cow; 2318 vm_page_unlock(m); 2319 } 2320} 2321 2322void 2323vm_page_cowclear(vm_page_t m) 2324{ 2325 2326 vm_page_lock_assert(m, MA_OWNED); 2327 if (m->cow) { 2328 m->cow--; 2329 /* 2330 * let vm_fault add back write permission lazily 2331 */ 2332 } 2333 /* 2334 * sf_buf_free() will free the page, so we needn't do it here 2335 */ 2336} 2337 2338int 2339vm_page_cowsetup(vm_page_t m) 2340{ 2341 2342 vm_page_lock_assert(m, MA_OWNED); 2343 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || 2344 m->cow == USHRT_MAX - 1 || !VM_OBJECT_TRYLOCK(m->object)) 2345 return (EBUSY); 2346 m->cow++; 2347 pmap_remove_write(m); 2348 VM_OBJECT_UNLOCK(m->object); 2349 return (0); 2350} 2351 2352#include "opt_ddb.h" 2353#ifdef DDB 2354#include <sys/kernel.h> 2355 2356#include <ddb/ddb.h> 2357 2358DB_SHOW_COMMAND(page, vm_page_print_page_info) 2359{ 2360 db_printf("cnt.v_free_count: %d\n", cnt.v_free_count); 2361 db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count); 2362 db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count); 2363 db_printf("cnt.v_active_count: %d\n", cnt.v_active_count); 2364 db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count); 2365 db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved); 2366 db_printf("cnt.v_free_min: %d\n", cnt.v_free_min); 2367 db_printf("cnt.v_free_target: %d\n", cnt.v_free_target); 2368 db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min); 2369 db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target); 2370} 2371 2372DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info) 2373{ 2374 2375 db_printf("PQ_FREE:"); 2376 db_printf(" %d", cnt.v_free_count); 2377 db_printf("\n"); 2378 2379 db_printf("PQ_CACHE:"); 2380 db_printf(" %d", cnt.v_cache_count); 2381 db_printf("\n"); 2382 2383 db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n", 2384 *vm_page_queues[PQ_ACTIVE].cnt, 2385 *vm_page_queues[PQ_INACTIVE].cnt); 2386} 2387#endif /* DDB */ 2388