1/* 2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28/* 29 * @OSF_COPYRIGHT@ 30 */ 31/* 32 * Mach Operating System 33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University 34 * All Rights Reserved. 35 * 36 * Permission to use, copy, modify and distribute this software and its 37 * documentation is hereby granted, provided that both the copyright 38 * notice and this permission notice appear in all copies of the 39 * software, derivative works or modified versions, and any portions 40 * thereof, and that both notices appear in supporting documentation. 41 * 42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR 44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 45 * 46 * Carnegie Mellon requests users of this software to return to 47 * 48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 49 * School of Computer Science 50 * Carnegie Mellon University 51 * Pittsburgh PA 15213-3890 52 * 53 * any improvements or extensions that they make and grant Carnegie Mellon 54 * the rights to redistribute these changes. 55 */ 56/* 57 */ 58/* 59 * File: vm/vm_page.h 60 * Author: Avadis Tevanian, Jr., Michael Wayne Young 61 * Date: 1985 62 * 63 * Resident memory system definitions. 64 */ 65 66#ifndef _VM_VM_PAGE_H_ 67#define _VM_VM_PAGE_H_ 68 69#include <debug.h> 70#include <vm/vm_options.h> 71 72#include <mach/boolean.h> 73#include <mach/vm_prot.h> 74#include <mach/vm_param.h> 75#include <vm/vm_object.h> 76#include <kern/queue.h> 77#include <kern/locks.h> 78 79#include <kern/macro_help.h> 80#include <libkern/OSAtomic.h> 81 82 83/* 84 * VM_PAGE_MIN_SPECULATIVE_AGE_Q through VM_PAGE_MAX_SPECULATIVE_AGE_Q 85 * represents a set of aging bins that are 'protected'... 86 * 87 * VM_PAGE_SPECULATIVE_AGED_Q is a list of the speculative pages that have 88 * not yet been 'claimed' but have been aged out of the protective bins 89 * this occurs in vm_page_speculate when it advances to the next bin 90 * and discovers that it is still occupied... at that point, all of the 91 * pages in that bin are moved to the VM_PAGE_SPECULATIVE_AGED_Q. the pages 92 * in that bin are all guaranteed to have reached at least the maximum age 93 * we allow for a protected page... they can be older if there is no 94 * memory pressure to pull them from the bin, or there are no new speculative pages 95 * being generated to push them out. 96 * this list is the one that vm_pageout_scan will prefer when looking 97 * for pages to move to the underweight free list 98 * 99 * VM_PAGE_MAX_SPECULATIVE_AGE_Q * VM_PAGE_SPECULATIVE_Q_AGE_MS 100 * defines the amount of time a speculative page is normally 101 * allowed to live in the 'protected' state (i.e. not available 102 * to be stolen if vm_pageout_scan is running and looking for 103 * pages)... however, if the total number of speculative pages 104 * in the protected state exceeds our limit (defined in vm_pageout.c) 105 * and there are none available in VM_PAGE_SPECULATIVE_AGED_Q, then 106 * vm_pageout_scan is allowed to steal pages from the protected 107 * bucket even if they are underage. 108 * 109 * vm_pageout_scan is also allowed to pull pages from a protected 110 * bin if the bin has reached the "age of consent" we've set 111 */ 112#define VM_PAGE_MAX_SPECULATIVE_AGE_Q 10 113#define VM_PAGE_MIN_SPECULATIVE_AGE_Q 1 114#define VM_PAGE_SPECULATIVE_AGED_Q 0 115 116#define VM_PAGE_SPECULATIVE_Q_AGE_MS 500 117 118struct vm_speculative_age_q { 119 /* 120 * memory queue for speculative pages via clustered pageins 121 */ 122 queue_head_t age_q; 123 mach_timespec_t age_ts; 124}; 125 126 127 128extern 129struct vm_speculative_age_q vm_page_queue_speculative[]; 130 131extern int speculative_steal_index; 132extern int speculative_age_index; 133extern unsigned int vm_page_speculative_q_age_ms; 134 135 136#define VM_PAGE_COMPRESSOR_COUNT (compressor_object->resident_page_count) 137 138/* 139 * Management of resident (logical) pages. 140 * 141 * A small structure is kept for each resident 142 * page, indexed by page number. Each structure 143 * is an element of several lists: 144 * 145 * A hash table bucket used to quickly 146 * perform object/offset lookups 147 * 148 * A list of all pages for a given object, 149 * so they can be quickly deactivated at 150 * time of deallocation. 151 * 152 * An ordered list of pages due for pageout. 153 * 154 * In addition, the structure contains the object 155 * and offset to which this page belongs (for pageout), 156 * and sundry status bits. 157 * 158 * Fields in this structure are locked either by the lock on the 159 * object that the page belongs to (O) or by the lock on the page 160 * queues (P). [Some fields require that both locks be held to 161 * change that field; holding either lock is sufficient to read.] 162 */ 163 164 165#if defined(__LP64__) 166 167/* 168 * in order to make the size of a vm_page_t 64 bytes (cache line size for both arm64 and x86_64) 169 * we'll keep the next_m pointer packed... as long as the kernel virtual space where we allocate 170 * vm_page_t's from doesn't span more then 256 Gbytes, we're safe. There are live tests in the 171 * vm_page_t array allocation and the zone init code to determine if we can safely pack and unpack 172 * pointers from the 2 ends of these spaces 173 */ 174typedef uint32_t vm_page_packed_t; 175 176#define VM_PAGE_PACK_PTR(m) (!(m) ? (vm_page_packed_t)0 : ((vm_page_packed_t)((uintptr_t)(((uintptr_t)(m) - (uintptr_t) VM_MIN_KERNEL_AND_KEXT_ADDRESS)) >> 6))) 177#define VM_PAGE_UNPACK_PTR(p) (!(p) ? VM_PAGE_NULL : ((vm_page_t)((((uintptr_t)(p)) << 6) + (uintptr_t) VM_MIN_KERNEL_AND_KEXT_ADDRESS))) 178 179#else 180 181/* 182 * we can't do the packing trick on 32 bit architectures, so 183 * just turn the macros into noops. 184 */ 185typedef struct vm_page *vm_page_packed_t; 186 187#define VM_PAGE_PACK_PTR(m) ((vm_page_packed_t)(m)) 188#define VM_PAGE_UNPACK_PTR(p) ((vm_page_t)(p)) 189 190#endif 191 192 193struct vm_page { 194 queue_chain_t pageq; /* queue info for FIFO */ 195 /* queue or free list (P) */ 196 197 queue_chain_t listq; /* all pages in same object (O) */ 198 199 vm_object_offset_t offset; /* offset into that object (O,P) */ 200 vm_object_t object; /* which object am I in (O&P) */ 201 202 vm_page_packed_t next_m; /* VP bucket link (O) */ 203 /* 204 * The following word of flags is protected 205 * by the "page queues" lock. 206 * 207 * we use the 'wire_count' field to store the local 208 * queue id if local queues are enabled... 209 * see the comments at 'VM_PAGE_QUEUES_REMOVE' as to 210 * why this is safe to do 211 */ 212#define local_id wire_count 213 unsigned int wire_count:16, /* how many wired down maps use me? (O&P) */ 214 /* boolean_t */ active:1, /* page is in active list (P) */ 215 inactive:1, /* page is in inactive list (P) */ 216 clean_queue:1, /* page is in pre-cleaned list (P) */ 217 local:1, /* page is in one of the local queues (P) */ 218 speculative:1, /* page is in speculative list (P) */ 219 throttled:1, /* pager is not responding or doesn't exist(P) */ 220 free:1, /* page is on free list (P) */ 221 pageout_queue:1,/* page is on queue for pageout (P) */ 222 laundry:1, /* page is being cleaned now (P)*/ 223 reference:1, /* page has been used (P) */ 224 gobbled:1, /* page used internally (P) */ 225 private:1, /* Page should not be returned to 226 * the free list (P) */ 227 no_cache:1, /* page is not to be cached and should 228 * be reused ahead of other pages (P) */ 229 230 __unused_pageq_bits:3; /* 3 bits available here */ 231 232 ppnum_t phys_page; /* Physical address of page, passed 233 * to pmap_enter (read-only) */ 234 235 /* 236 * The following word of flags is protected 237 * by the "VM object" lock. 238 */ 239 unsigned int 240 /* boolean_t */ busy:1, /* page is in transit (O) */ 241 wanted:1, /* someone is waiting for page (O) */ 242 tabled:1, /* page is in VP table (O) */ 243 hashed:1, /* page is in vm_page_buckets[] 244 (O) + the bucket lock */ 245 fictitious:1, /* Physical page doesn't exist (O) */ 246 /* 247 * IMPORTANT: the "pmapped", "xpmapped" and "clustered" bits can be modified while holding the 248 * VM object "shared" lock + the page lock provided through the pmap_lock_phys_page function. 249 * This is done in vm_fault_enter and the CONSUME_CLUSTERED macro. 250 * It's also ok to modify them behind just the VM object "exclusive" lock. 251 */ 252 clustered:1, /* page is not the faulted page (O) or (O-shared AND pmap_page) */ 253 pmapped:1, /* page has been entered at some 254 * point into a pmap (O) or (O-shared AND pmap_page) */ 255 xpmapped:1, /* page has been entered with execute permission (O) 256 or (O-shared AND pmap_page) */ 257 258 wpmapped:1, /* page has been entered at some 259 * point into a pmap for write (O) */ 260 pageout:1, /* page wired & busy for pageout (O) */ 261 absent:1, /* Data has been requested, but is 262 * not yet available (O) */ 263 error:1, /* Data manager was unable to provide 264 * data due to error (O) */ 265 dirty:1, /* Page must be cleaned (O) */ 266 cleaning:1, /* Page clean has begun (O) */ 267 precious:1, /* Page is precious; data must be 268 * returned even if clean (O) */ 269 overwriting:1, /* Request to unlock has been made 270 * without having data. (O) 271 * [See vm_fault_page_overwrite] */ 272 restart:1, /* Page was pushed higher in shadow 273 chain by copy_call-related pagers; 274 start again at top of chain */ 275 unusual:1, /* Page is absent, error, restart or 276 page locked */ 277 encrypted:1, /* encrypted for secure swap (O) */ 278 encrypted_cleaning:1, /* encrypting page */ 279 cs_validated:1, /* code-signing: page was checked */ 280 cs_tainted:1, /* code-signing: page is tainted */ 281 reusable:1, 282 lopage:1, 283 slid:1, 284 compressor:1, /* page owned by compressor pool */ 285 written_by_kernel:1, /* page was written by kernel (i.e. decompressed) */ 286 __unused_object_bits:5; /* 5 bits available here */ 287}; 288 289#define DEBUG_ENCRYPTED_SWAP 1 290#if DEBUG_ENCRYPTED_SWAP 291#define ASSERT_PAGE_DECRYPTED(page) \ 292 MACRO_BEGIN \ 293 if ((page)->encrypted) { \ 294 panic("VM page %p should not be encrypted here\n", \ 295 (page)); \ 296 } \ 297 MACRO_END 298#else /* DEBUG_ENCRYPTED_SWAP */ 299#define ASSERT_PAGE_DECRYPTED(page) assert(!(page)->encrypted) 300#endif /* DEBUG_ENCRYPTED_SWAP */ 301 302typedef struct vm_page *vm_page_t; 303 304 305typedef struct vm_locks_array { 306 char pad __attribute__ ((aligned (64))); 307 lck_mtx_t vm_page_queue_lock2 __attribute__ ((aligned (64))); 308 lck_mtx_t vm_page_queue_free_lock2 __attribute__ ((aligned (64))); 309 char pad2 __attribute__ ((aligned (64))); 310} vm_locks_array_t; 311 312 313#define VM_PAGE_WIRED(m) ((!(m)->local && (m)->wire_count)) 314#define VM_PAGE_NULL ((vm_page_t) 0) 315#define NEXT_PAGE(m) ((vm_page_t) (m)->pageq.next) 316#define NEXT_PAGE_PTR(m) ((vm_page_t *) &(m)->pageq.next) 317 318/* 319 * XXX The unusual bit should not be necessary. Most of the bit 320 * XXX fields above really want to be masks. 321 */ 322 323/* 324 * For debugging, this macro can be defined to perform 325 * some useful check on a page structure. 326 */ 327 328#define VM_PAGE_CHECK(mem) \ 329 MACRO_BEGIN \ 330 VM_PAGE_QUEUES_ASSERT(mem, 1); \ 331 MACRO_END 332 333/* Page coloring: 334 * 335 * The free page list is actually n lists, one per color, 336 * where the number of colors is a function of the machine's 337 * cache geometry set at system initialization. To disable 338 * coloring, set vm_colors to 1 and vm_color_mask to 0. 339 * The boot-arg "colors" may be used to override vm_colors. 340 * Note that there is little harm in having more colors than needed. 341 */ 342 343#define MAX_COLORS 128 344#define DEFAULT_COLORS 32 345 346extern 347unsigned int vm_colors; /* must be in range 1..MAX_COLORS */ 348extern 349unsigned int vm_color_mask; /* must be (vm_colors-1) */ 350extern 351unsigned int vm_cache_geometry_colors; /* optimal #colors based on cache geometry */ 352 353/* 354 * Wired memory is a very limited resource and we can't let users exhaust it 355 * and deadlock the entire system. We enforce the following limits: 356 * 357 * vm_user_wire_limit (default: all memory minus vm_global_no_user_wire_amount) 358 * how much memory can be user-wired in one user task 359 * 360 * vm_global_user_wire_limit (default: same as vm_user_wire_limit) 361 * how much memory can be user-wired in all user tasks 362 * 363 * vm_global_no_user_wire_amount (default: VM_NOT_USER_WIREABLE) 364 * how much memory must remain user-unwired at any time 365 */ 366#define VM_NOT_USER_WIREABLE (64*1024*1024) /* 64MB */ 367extern 368vm_map_size_t vm_user_wire_limit; 369extern 370vm_map_size_t vm_global_user_wire_limit; 371extern 372vm_map_size_t vm_global_no_user_wire_amount; 373 374/* 375 * Each pageable resident page falls into one of three lists: 376 * 377 * free 378 * Available for allocation now. The free list is 379 * actually an array of lists, one per color. 380 * inactive 381 * Not referenced in any map, but still has an 382 * object/offset-page mapping, and may be dirty. 383 * This is the list of pages that should be 384 * paged out next. There are actually two 385 * inactive lists, one for pages brought in from 386 * disk or other backing store, and another 387 * for "zero-filled" pages. See vm_pageout_scan() 388 * for the distinction and usage. 389 * active 390 * A list of pages which have been placed in 391 * at least one physical map. This list is 392 * ordered, in LRU-like fashion. 393 */ 394 395 396#define VPL_LOCK_SPIN 1 397 398struct vpl { 399 unsigned int vpl_count; 400 unsigned int vpl_internal_count; 401 unsigned int vpl_external_count; 402 queue_head_t vpl_queue; 403#ifdef VPL_LOCK_SPIN 404 lck_spin_t vpl_lock; 405#else 406 lck_mtx_t vpl_lock; 407 lck_mtx_ext_t vpl_lock_ext; 408#endif 409}; 410 411struct vplq { 412 union { 413 char cache_line_pad[128]; 414 struct vpl vpl; 415 } vpl_un; 416}; 417extern 418unsigned int vm_page_local_q_count; 419extern 420struct vplq *vm_page_local_q; 421extern 422unsigned int vm_page_local_q_soft_limit; 423extern 424unsigned int vm_page_local_q_hard_limit; 425extern 426vm_locks_array_t vm_page_locks; 427 428extern 429queue_head_t vm_page_queue_free[MAX_COLORS]; /* memory free queue */ 430extern 431queue_head_t vm_lopage_queue_free; /* low memory free queue */ 432extern 433queue_head_t vm_page_queue_active; /* active memory queue */ 434extern 435queue_head_t vm_page_queue_inactive; /* inactive memory queue for normal pages */ 436extern 437queue_head_t vm_page_queue_cleaned; /* clean-queue inactive memory */ 438extern 439queue_head_t vm_page_queue_anonymous; /* inactive memory queue for anonymous pages */ 440extern 441queue_head_t vm_page_queue_throttled; /* memory queue for throttled pageout pages */ 442 443extern 444vm_offset_t first_phys_addr; /* physical address for first_page */ 445extern 446vm_offset_t last_phys_addr; /* physical address for last_page */ 447 448extern 449unsigned int vm_page_free_count; /* How many pages are free? (sum of all colors) */ 450extern 451unsigned int vm_page_fictitious_count;/* How many fictitious pages are free? */ 452extern 453unsigned int vm_page_active_count; /* How many pages are active? */ 454extern 455unsigned int vm_page_inactive_count; /* How many pages are inactive? */ 456extern 457unsigned int vm_page_cleaned_count; /* How many pages are in the clean queue? */ 458extern 459unsigned int vm_page_throttled_count;/* How many inactives are throttled */ 460extern 461unsigned int vm_page_speculative_count; /* How many speculative pages are unclaimed? */ 462extern unsigned int vm_page_pageable_internal_count; 463extern unsigned int vm_page_pageable_external_count; 464extern 465unsigned int vm_page_xpmapped_external_count; /* How many pages are mapped executable? */ 466extern 467unsigned int vm_page_external_count; /* How many pages are file-backed? */ 468extern 469unsigned int vm_page_internal_count; /* How many pages are anonymous? */ 470extern 471unsigned int vm_page_wire_count; /* How many pages are wired? */ 472extern 473unsigned int vm_page_wire_count_initial; /* How many pages wired at startup */ 474extern 475unsigned int vm_page_free_target; /* How many do we want free? */ 476extern 477unsigned int vm_page_free_min; /* When to wakeup pageout */ 478extern 479unsigned int vm_page_throttle_limit; /* When to throttle new page creation */ 480extern 481uint32_t vm_page_creation_throttle; /* When to throttle new page creation */ 482extern 483unsigned int vm_page_inactive_target;/* How many do we want inactive? */ 484extern 485unsigned int vm_page_anonymous_min; /* When it's ok to pre-clean */ 486extern 487unsigned int vm_page_inactive_min; /* When do wakeup pageout */ 488extern 489unsigned int vm_page_free_reserved; /* How many pages reserved to do pageout */ 490extern 491unsigned int vm_page_throttle_count; /* Count of page allocations throttled */ 492extern 493unsigned int vm_page_gobble_count; 494 495#if DEVELOPMENT || DEBUG 496extern 497unsigned int vm_page_speculative_used; 498#endif 499 500extern 501unsigned int vm_page_purgeable_count;/* How many pages are purgeable now ? */ 502extern 503unsigned int vm_page_purgeable_wired_count;/* How many purgeable pages are wired now ? */ 504extern 505uint64_t vm_page_purged_count; /* How many pages got purged so far ? */ 506 507extern unsigned int vm_page_free_wanted; 508 /* how many threads are waiting for memory */ 509 510extern unsigned int vm_page_free_wanted_privileged; 511 /* how many VM privileged threads are waiting for memory */ 512 513extern ppnum_t vm_page_fictitious_addr; 514 /* (fake) phys_addr of fictitious pages */ 515 516extern ppnum_t vm_page_guard_addr; 517 /* (fake) phys_addr of guard pages */ 518 519 520extern boolean_t vm_page_deactivate_hint; 521 522extern int vm_compressor_mode; 523 524/* 525 0 = all pages avail ( default. ) 526 1 = disable high mem ( cap max pages to 4G) 527 2 = prefer himem 528*/ 529extern int vm_himemory_mode; 530 531extern boolean_t vm_lopage_needed; 532extern uint32_t vm_lopage_free_count; 533extern uint32_t vm_lopage_free_limit; 534extern uint32_t vm_lopage_lowater; 535extern boolean_t vm_lopage_refill; 536extern uint64_t max_valid_dma_address; 537extern ppnum_t max_valid_low_ppnum; 538 539/* 540 * Prototypes for functions exported by this module. 541 */ 542extern void vm_page_bootstrap( 543 vm_offset_t *startp, 544 vm_offset_t *endp); 545 546extern void vm_page_module_init(void); 547 548extern void vm_page_init_local_q(void); 549 550extern void vm_page_create( 551 ppnum_t start, 552 ppnum_t end); 553 554extern vm_page_t vm_page_lookup( 555 vm_object_t object, 556 vm_object_offset_t offset); 557 558extern vm_page_t vm_page_grab_fictitious(void); 559 560extern vm_page_t vm_page_grab_guard(void); 561 562extern void vm_page_release_fictitious( 563 vm_page_t page); 564 565extern void vm_page_more_fictitious(void); 566 567extern int vm_pool_low(void); 568 569extern vm_page_t vm_page_grab(void); 570 571extern vm_page_t vm_page_grablo(void); 572 573extern void vm_page_release( 574 vm_page_t page); 575 576extern boolean_t vm_page_wait( 577 int interruptible ); 578 579extern vm_page_t vm_page_alloc( 580 vm_object_t object, 581 vm_object_offset_t offset); 582 583extern vm_page_t vm_page_alloclo( 584 vm_object_t object, 585 vm_object_offset_t offset); 586 587extern vm_page_t vm_page_alloc_guard( 588 vm_object_t object, 589 vm_object_offset_t offset); 590 591extern void vm_page_init( 592 vm_page_t page, 593 ppnum_t phys_page, 594 boolean_t lopage); 595 596extern void vm_page_free( 597 vm_page_t page); 598 599extern void vm_page_free_unlocked( 600 vm_page_t page, 601 boolean_t remove_from_hash); 602 603extern void vm_page_activate( 604 vm_page_t page); 605 606extern void vm_page_deactivate( 607 vm_page_t page); 608 609extern void vm_page_deactivate_internal( 610 vm_page_t page, 611 boolean_t clear_hw_reference); 612 613extern void vm_page_enqueue_cleaned(vm_page_t page); 614 615extern void vm_page_lru( 616 vm_page_t page); 617 618extern void vm_page_speculate( 619 vm_page_t page, 620 boolean_t new); 621 622extern void vm_page_speculate_ageit( 623 struct vm_speculative_age_q *aq); 624 625extern void vm_page_reactivate_all_throttled(void); 626 627extern void vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks); 628 629extern void vm_page_rename( 630 vm_page_t page, 631 vm_object_t new_object, 632 vm_object_offset_t new_offset, 633 boolean_t encrypted_ok); 634 635extern void vm_page_insert( 636 vm_page_t page, 637 vm_object_t object, 638 vm_object_offset_t offset); 639 640extern void vm_page_insert_internal( 641 vm_page_t page, 642 vm_object_t object, 643 vm_object_offset_t offset, 644 boolean_t queues_lock_held, 645 boolean_t insert_in_hash, 646 boolean_t batch_pmap_op); 647 648extern void vm_page_replace( 649 vm_page_t mem, 650 vm_object_t object, 651 vm_object_offset_t offset); 652 653extern void vm_page_remove( 654 vm_page_t page, 655 boolean_t remove_from_hash); 656 657extern void vm_page_zero_fill( 658 vm_page_t page); 659 660extern void vm_page_part_zero_fill( 661 vm_page_t m, 662 vm_offset_t m_pa, 663 vm_size_t len); 664 665extern void vm_page_copy( 666 vm_page_t src_page, 667 vm_page_t dest_page); 668 669extern void vm_page_part_copy( 670 vm_page_t src_m, 671 vm_offset_t src_pa, 672 vm_page_t dst_m, 673 vm_offset_t dst_pa, 674 vm_size_t len); 675 676extern void vm_page_wire( 677 vm_page_t page); 678 679extern void vm_page_unwire( 680 vm_page_t page, 681 boolean_t queueit); 682 683extern void vm_set_page_size(void); 684 685extern void vm_page_gobble( 686 vm_page_t page); 687 688extern void vm_page_validate_cs(vm_page_t page); 689extern void vm_page_validate_cs_mapped( 690 vm_page_t page, 691 const void *kaddr); 692 693extern void vm_page_free_prepare_queues( 694 vm_page_t page); 695 696extern void vm_page_free_prepare_object( 697 vm_page_t page, 698 boolean_t remove_from_hash); 699 700#if CONFIG_IOSCHED 701extern wait_result_t vm_page_sleep( 702 vm_object_t object, 703 vm_page_t m, 704 int interruptible); 705#endif 706 707extern void vm_pressure_response(void); 708 709#if CONFIG_JETSAM 710extern void memorystatus_pages_update(unsigned int pages_avail); 711 712#define VM_CHECK_MEMORYSTATUS do { \ 713 memorystatus_pages_update( \ 714 vm_page_pageable_external_count + \ 715 vm_page_free_count + \ 716 (VM_DYNAMIC_PAGING_ENABLED(memory_manager_default) ? 0 : vm_page_purgeable_count) \ 717 ); \ 718 } while(0) 719 720#else /* CONFIG_JETSAM */ 721 722 723#define VM_CHECK_MEMORYSTATUS vm_pressure_response() 724 725 726#endif /* CONFIG_JETSAM */ 727 728/* 729 * Functions implemented as macros. m->wanted and m->busy are 730 * protected by the object lock. 731 */ 732 733#define SET_PAGE_DIRTY(m, set_pmap_modified) \ 734 MACRO_BEGIN \ 735 vm_page_t __page__ = (m); \ 736 __page__->dirty = TRUE; \ 737 MACRO_END 738 739#define PAGE_ASSERT_WAIT(m, interruptible) \ 740 (((m)->wanted = TRUE), \ 741 assert_wait((event_t) (m), (interruptible))) 742 743#if CONFIG_IOSCHED 744#define PAGE_SLEEP(o, m, interruptible) \ 745 vm_page_sleep(o, m, interruptible) 746#else 747#define PAGE_SLEEP(o, m, interruptible) \ 748 (((m)->wanted = TRUE), \ 749 thread_sleep_vm_object((o), (m), (interruptible))) 750#endif 751 752#define PAGE_WAKEUP_DONE(m) \ 753 MACRO_BEGIN \ 754 (m)->busy = FALSE; \ 755 if ((m)->wanted) { \ 756 (m)->wanted = FALSE; \ 757 thread_wakeup((event_t) (m)); \ 758 } \ 759 MACRO_END 760 761#define PAGE_WAKEUP(m) \ 762 MACRO_BEGIN \ 763 if ((m)->wanted) { \ 764 (m)->wanted = FALSE; \ 765 thread_wakeup((event_t) (m)); \ 766 } \ 767 MACRO_END 768 769#define VM_PAGE_FREE(p) \ 770 MACRO_BEGIN \ 771 vm_page_free_unlocked(p, TRUE); \ 772 MACRO_END 773 774#define VM_PAGE_GRAB_FICTITIOUS(M) \ 775 MACRO_BEGIN \ 776 while ((M = vm_page_grab_fictitious()) == VM_PAGE_NULL) \ 777 vm_page_more_fictitious(); \ 778 MACRO_END 779 780#define VM_PAGE_WAIT() ((void)vm_page_wait(THREAD_UNINT)) 781 782#define vm_page_queue_lock (vm_page_locks.vm_page_queue_lock2) 783#define vm_page_queue_free_lock (vm_page_locks.vm_page_queue_free_lock2) 784 785#define vm_page_lock_queues() lck_mtx_lock(&vm_page_queue_lock) 786#define vm_page_unlock_queues() lck_mtx_unlock(&vm_page_queue_lock) 787 788#define vm_page_lockspin_queues() lck_mtx_lock_spin(&vm_page_queue_lock) 789#define vm_page_trylockspin_queues() lck_mtx_try_lock_spin(&vm_page_queue_lock) 790#define vm_page_lockconvert_queues() lck_mtx_convert_spin(&vm_page_queue_lock) 791 792#ifdef VPL_LOCK_SPIN 793#define VPL_LOCK_INIT(vlq, vpl_grp, vpl_attr) lck_spin_init(&vlq->vpl_lock, vpl_grp, vpl_attr) 794#define VPL_LOCK(vpl) lck_spin_lock(vpl) 795#define VPL_UNLOCK(vpl) lck_spin_unlock(vpl) 796#else 797#define VPL_LOCK_INIT(vlq, vpl_grp, vpl_attr) lck_mtx_init_ext(&vlq->vpl_lock, &vlq->vpl_lock_ext, vpl_grp, vpl_attr) 798#define VPL_LOCK(vpl) lck_mtx_lock_spin(vpl) 799#define VPL_UNLOCK(vpl) lck_mtx_unlock(vpl) 800#endif 801 802#if MACH_ASSERT 803extern void vm_page_queues_assert(vm_page_t mem, int val); 804#define VM_PAGE_QUEUES_ASSERT(mem, val) vm_page_queues_assert((mem), (val)) 805#else 806#define VM_PAGE_QUEUES_ASSERT(mem, val) 807#endif 808 809 810/* 811 * 'vm_fault_enter' will place newly created pages (zero-fill and COW) onto the 812 * local queues if they exist... its the only spot in the system where we add pages 813 * to those queues... once on those queues, those pages can only move to one of the 814 * global page queues or the free queues... they NEVER move from local q to local q. 815 * the 'local' state is stable when VM_PAGE_QUEUES_REMOVE is called since we're behind 816 * the global vm_page_queue_lock at this point... we still need to take the local lock 817 * in case this operation is being run on a different CPU then the local queue's identity, 818 * but we don't have to worry about the page moving to a global queue or becoming wired 819 * while we're grabbing the local lock since those operations would require the global 820 * vm_page_queue_lock to be held, and we already own it. 821 * 822 * this is why its safe to utilze the wire_count field in the vm_page_t as the local_id... 823 * 'wired' and local are ALWAYS mutually exclusive conditions. 824 */ 825 826#define VM_PAGE_QUEUES_REMOVE(mem) \ 827 MACRO_BEGIN \ 828 boolean_t was_pageable; \ 829 \ 830 VM_PAGE_QUEUES_ASSERT(mem, 1); \ 831 assert(!mem->pageout_queue); \ 832/* \ 833 * if (mem->pageout_queue) \ 834 * NOTE: VM_PAGE_QUEUES_REMOVE does not deal with removing pages from the pageout queue... \ 835 * the caller is responsible for determing if the page is on that queue, and if so, must \ 836 * either first remove it (it needs both the page queues lock and the object lock to do \ 837 * this via vm_pageout_steal_laundry), or avoid the call to VM_PAGE_QUEUES_REMOVE \ 838 */ \ 839 if (mem->local) { \ 840 struct vpl *lq; \ 841 assert(mem->object != kernel_object); \ 842 assert(mem->object != compressor_object); \ 843 assert(!mem->inactive && !mem->speculative); \ 844 assert(!mem->active && !mem->throttled); \ 845 assert(!mem->clean_queue); \ 846 assert(!mem->fictitious); \ 847 lq = &vm_page_local_q[mem->local_id].vpl_un.vpl; \ 848 VPL_LOCK(&lq->vpl_lock); \ 849 queue_remove(&lq->vpl_queue, \ 850 mem, vm_page_t, pageq); \ 851 mem->local = FALSE; \ 852 mem->local_id = 0; \ 853 lq->vpl_count--; \ 854 if (mem->object->internal) { \ 855 lq->vpl_internal_count--; \ 856 } else { \ 857 lq->vpl_external_count--; \ 858 } \ 859 VPL_UNLOCK(&lq->vpl_lock); \ 860 was_pageable = FALSE; \ 861 } \ 862 \ 863 else if (mem->active) { \ 864 assert(mem->object != kernel_object); \ 865 assert(mem->object != compressor_object); \ 866 assert(!mem->inactive && !mem->speculative); \ 867 assert(!mem->clean_queue); \ 868 assert(!mem->throttled); \ 869 assert(!mem->fictitious); \ 870 queue_remove(&vm_page_queue_active, \ 871 mem, vm_page_t, pageq); \ 872 mem->active = FALSE; \ 873 vm_page_active_count--; \ 874 was_pageable = TRUE; \ 875 } \ 876 \ 877 else if (mem->inactive) { \ 878 assert(mem->object != kernel_object); \ 879 assert(mem->object != compressor_object); \ 880 assert(!mem->active && !mem->speculative); \ 881 assert(!mem->throttled); \ 882 assert(!mem->fictitious); \ 883 vm_page_inactive_count--; \ 884 if (mem->clean_queue) { \ 885 queue_remove(&vm_page_queue_cleaned, \ 886 mem, vm_page_t, pageq); \ 887 mem->clean_queue = FALSE; \ 888 vm_page_cleaned_count--; \ 889 } else { \ 890 if (mem->object->internal) { \ 891 queue_remove(&vm_page_queue_anonymous, \ 892 mem, vm_page_t, pageq); \ 893 vm_page_anonymous_count--; \ 894 } else { \ 895 queue_remove(&vm_page_queue_inactive, \ 896 mem, vm_page_t, pageq); \ 897 } \ 898 vm_purgeable_q_advance_all(); \ 899 } \ 900 mem->inactive = FALSE; \ 901 was_pageable = TRUE; \ 902 } \ 903 \ 904 else if (mem->throttled) { \ 905 assert(mem->object != compressor_object); \ 906 assert(!mem->active && !mem->inactive); \ 907 assert(!mem->speculative); \ 908 assert(!mem->fictitious); \ 909 queue_remove(&vm_page_queue_throttled, \ 910 mem, vm_page_t, pageq); \ 911 mem->throttled = FALSE; \ 912 vm_page_throttled_count--; \ 913 was_pageable = FALSE; \ 914 } \ 915 \ 916 else if (mem->speculative) { \ 917 assert(mem->object != compressor_object); \ 918 assert(!mem->active && !mem->inactive); \ 919 assert(!mem->throttled); \ 920 assert(!mem->fictitious); \ 921 remque(&mem->pageq); \ 922 mem->speculative = FALSE; \ 923 vm_page_speculative_count--; \ 924 was_pageable = TRUE; \ 925 } \ 926 \ 927 else if (mem->pageq.next || mem->pageq.prev) { \ 928 was_pageable = FALSE; \ 929 panic("VM_PAGE_QUEUES_REMOVE: unmarked page on Q"); \ 930 } else { \ 931 was_pageable = FALSE; \ 932 } \ 933 \ 934 mem->pageq.next = NULL; \ 935 mem->pageq.prev = NULL; \ 936 VM_PAGE_QUEUES_ASSERT(mem, 0); \ 937 if (was_pageable) { \ 938 if (mem->object->internal) { \ 939 vm_page_pageable_internal_count--; \ 940 } else { \ 941 vm_page_pageable_external_count--; \ 942 } \ 943 } \ 944 MACRO_END 945 946 947#define VM_PAGE_ENQUEUE_INACTIVE(mem, first) \ 948 MACRO_BEGIN \ 949 VM_PAGE_QUEUES_ASSERT(mem, 0); \ 950 assert(!mem->fictitious); \ 951 assert(!mem->laundry); \ 952 assert(!mem->pageout_queue); \ 953 if (mem->object->internal) { \ 954 if (first == TRUE) \ 955 queue_enter_first(&vm_page_queue_anonymous, mem, vm_page_t, pageq); \ 956 else \ 957 queue_enter(&vm_page_queue_anonymous, mem, vm_page_t, pageq); \ 958 vm_page_anonymous_count++; \ 959 vm_page_pageable_internal_count++; \ 960 } else { \ 961 if (first == TRUE) \ 962 queue_enter_first(&vm_page_queue_inactive, mem, vm_page_t, pageq); \ 963 else \ 964 queue_enter(&vm_page_queue_inactive, mem, vm_page_t, pageq); \ 965 vm_page_pageable_external_count++; \ 966 } \ 967 mem->inactive = TRUE; \ 968 vm_page_inactive_count++; \ 969 token_new_pagecount++; \ 970 MACRO_END 971 972 973#if DEVELOPMENT || DEBUG 974#define VM_PAGE_SPECULATIVE_USED_ADD() \ 975 MACRO_BEGIN \ 976 OSAddAtomic(1, &vm_page_speculative_used); \ 977 MACRO_END 978#else 979#define VM_PAGE_SPECULATIVE_USED_ADD() 980#endif 981 982 983#define VM_PAGE_CONSUME_CLUSTERED(mem) \ 984 MACRO_BEGIN \ 985 pmap_lock_phys_page(mem->phys_page); \ 986 if (mem->clustered) { \ 987 assert(mem->object); \ 988 mem->object->pages_used++; \ 989 mem->clustered = FALSE; \ 990 VM_PAGE_SPECULATIVE_USED_ADD(); \ 991 } \ 992 pmap_unlock_phys_page(mem->phys_page); \ 993 MACRO_END 994 995 996#define VM_PAGE_COUNT_AS_PAGEIN(mem) \ 997 MACRO_BEGIN \ 998 DTRACE_VM2(pgin, int, 1, (uint64_t *), NULL); \ 999 current_task()->pageins++; \ 1000 if (mem->object->internal) { \ 1001 DTRACE_VM2(anonpgin, int, 1, (uint64_t *), NULL); \ 1002 } else { \ 1003 DTRACE_VM2(fspgin, int, 1, (uint64_t *), NULL); \ 1004 } \ 1005 MACRO_END 1006 1007 1008#define DW_vm_page_unwire 0x01 1009#define DW_vm_page_wire 0x02 1010#define DW_vm_page_free 0x04 1011#define DW_vm_page_activate 0x08 1012#define DW_vm_page_deactivate_internal 0x10 1013#define DW_vm_page_speculate 0x20 1014#define DW_vm_page_lru 0x40 1015#define DW_vm_pageout_throttle_up 0x80 1016#define DW_PAGE_WAKEUP 0x100 1017#define DW_clear_busy 0x200 1018#define DW_clear_reference 0x400 1019#define DW_set_reference 0x800 1020#define DW_move_page 0x1000 1021#define DW_VM_PAGE_QUEUES_REMOVE 0x2000 1022#define DW_enqueue_cleaned 0x4000 1023#define DW_vm_phantom_cache_update 0x8000 1024 1025struct vm_page_delayed_work { 1026 vm_page_t dw_m; 1027 int dw_mask; 1028}; 1029 1030void vm_page_do_delayed_work(vm_object_t object, struct vm_page_delayed_work *dwp, int dw_count); 1031 1032extern unsigned int vm_max_delayed_work_limit; 1033 1034#define DEFAULT_DELAYED_WORK_LIMIT 32 1035 1036#define DELAYED_WORK_LIMIT(max) ((vm_max_delayed_work_limit >= max ? max : vm_max_delayed_work_limit)) 1037 1038/* 1039 * vm_page_do_delayed_work may need to drop the object lock... 1040 * if it does, we need the pages it's looking at to 1041 * be held stable via the busy bit, so if busy isn't already 1042 * set, we need to set it and ask vm_page_do_delayed_work 1043 * to clear it and wakeup anyone that might have blocked on 1044 * it once we're done processing the page. 1045 */ 1046 1047#define VM_PAGE_ADD_DELAYED_WORK(dwp, mem, dw_cnt) \ 1048 MACRO_BEGIN \ 1049 if (mem->busy == FALSE) { \ 1050 mem->busy = TRUE; \ 1051 if ( !(dwp->dw_mask & DW_vm_page_free)) \ 1052 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP); \ 1053 } \ 1054 dwp->dw_m = mem; \ 1055 dwp++; \ 1056 dw_cnt++; \ 1057 MACRO_END 1058 1059extern vm_page_t vm_object_page_grab(vm_object_t); 1060 1061#if VM_PAGE_BUCKETS_CHECK 1062extern void vm_page_buckets_check(void); 1063#endif /* VM_PAGE_BUCKETS_CHECK */ 1064 1065#endif /* _VM_VM_PAGE_H_ */ 1066