1/* 2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28/* 29 * @OSF_COPYRIGHT@ 30 */ 31/* 32 * Mach Operating System 33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University 34 * All Rights Reserved. 35 * 36 * Permission to use, copy, modify and distribute this software and its 37 * documentation is hereby granted, provided that both the copyright 38 * notice and this permission notice appear in all copies of the 39 * software, derivative works or modified versions, and any portions 40 * thereof, and that both notices appear in supporting documentation. 41 * 42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR 44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 45 * 46 * Carnegie Mellon requests users of this software to return to 47 * 48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 49 * School of Computer Science 50 * Carnegie Mellon University 51 * Pittsburgh PA 15213-3890 52 * 53 * any improvements or extensions that they make and grant Carnegie Mellon 54 * the rights to redistribute these changes. 55 */ 56/* 57 */ 58/* 59 * File: vm/vm_page.h 60 * Author: Avadis Tevanian, Jr., Michael Wayne Young 61 * Date: 1985 62 * 63 * Resident memory system definitions. 64 */ 65 66#ifndef _VM_VM_PAGE_H_ 67#define _VM_VM_PAGE_H_ 68 69#include <mach/boolean.h> 70#include <mach/vm_prot.h> 71#include <mach/vm_param.h> 72#include <vm/vm_object.h> 73#include <kern/queue.h> 74#include <kern/lock.h> 75 76#include <kern/macro_help.h> 77#include <libkern/OSAtomic.h> 78 79 80/* 81 * VM_PAGE_MIN_SPECULATIVE_AGE_Q through VM_PAGE_MAX_SPECULATIVE_AGE_Q 82 * represents a set of aging bins that are 'protected'... 83 * 84 * VM_PAGE_SPECULATIVE_AGED_Q is a list of the speculative pages that have 85 * not yet been 'claimed' but have been aged out of the protective bins 86 * this occurs in vm_page_speculate when it advances to the next bin 87 * and discovers that it is still occupied... at that point, all of the 88 * pages in that bin are moved to the VM_PAGE_SPECULATIVE_AGED_Q. the pages 89 * in that bin are all guaranteed to have reached at least the maximum age 90 * we allow for a protected page... they can be older if there is no 91 * memory pressure to pull them from the bin, or there are no new speculative pages 92 * being generated to push them out. 93 * this list is the one that vm_pageout_scan will prefer when looking 94 * for pages to move to the underweight free list 95 * 96 * VM_PAGE_MAX_SPECULATIVE_AGE_Q * VM_PAGE_SPECULATIVE_Q_AGE_MS 97 * defines the amount of time a speculative page is normally 98 * allowed to live in the 'protected' state (i.e. not available 99 * to be stolen if vm_pageout_scan is running and looking for 100 * pages)... however, if the total number of speculative pages 101 * in the protected state exceeds our limit (defined in vm_pageout.c) 102 * and there are none available in VM_PAGE_SPECULATIVE_AGED_Q, then 103 * vm_pageout_scan is allowed to steal pages from the protected 104 * bucket even if they are underage. 105 * 106 * vm_pageout_scan is also allowed to pull pages from a protected 107 * bin if the bin has reached the "age of consent" we've set 108 */ 109#define VM_PAGE_MAX_SPECULATIVE_AGE_Q 10 110#define VM_PAGE_MIN_SPECULATIVE_AGE_Q 1 111#define VM_PAGE_SPECULATIVE_AGED_Q 0 112 113#define VM_PAGE_SPECULATIVE_Q_AGE_MS 500 114 115struct vm_speculative_age_q { 116 /* 117 * memory queue for speculative pages via clustered pageins 118 */ 119 queue_head_t age_q; 120 mach_timespec_t age_ts; 121}; 122 123 124 125extern 126struct vm_speculative_age_q vm_page_queue_speculative[]; 127 128extern int speculative_steal_index; 129extern int speculative_age_index; 130extern unsigned int vm_page_speculative_q_age_ms; 131 132 133/* 134 * Management of resident (logical) pages. 135 * 136 * A small structure is kept for each resident 137 * page, indexed by page number. Each structure 138 * is an element of several lists: 139 * 140 * A hash table bucket used to quickly 141 * perform object/offset lookups 142 * 143 * A list of all pages for a given object, 144 * so they can be quickly deactivated at 145 * time of deallocation. 146 * 147 * An ordered list of pages due for pageout. 148 * 149 * In addition, the structure contains the object 150 * and offset to which this page belongs (for pageout), 151 * and sundry status bits. 152 * 153 * Fields in this structure are locked either by the lock on the 154 * object that the page belongs to (O) or by the lock on the page 155 * queues (P). [Some fields require that both locks be held to 156 * change that field; holding either lock is sufficient to read.] 157 */ 158 159struct vm_page { 160 queue_chain_t pageq; /* queue info for FIFO */ 161 /* queue or free list (P) */ 162 163 queue_chain_t listq; /* all pages in same object (O) */ 164 struct vm_page *next; /* VP bucket link (O) */ 165 166 vm_object_t object; /* which object am I in (O&P) */ 167 vm_object_offset_t offset; /* offset into that object (O,P) */ 168 169 /* 170 * The following word of flags is protected 171 * by the "page queues" lock. 172 * 173 * we use the 'wire_count' field to store the local 174 * queue id if local queues are enabled... 175 * see the comments at 'VM_PAGE_QUEUES_REMOVE' as to 176 * why this is safe to do 177 */ 178#define local_id wire_count 179 unsigned int wire_count:16, /* how many wired down maps use me? (O&P) */ 180 /* boolean_t */ active:1, /* page is in active list (P) */ 181 inactive:1, /* page is in inactive list (P) */ 182 clean_queue:1, /* page is in pre-cleaned list (P) */ 183 local:1, /* page is in one of the local queues (P) */ 184 speculative:1, /* page is in speculative list (P) */ 185 throttled:1, /* pager is not responding (P) */ 186 free:1, /* page is on free list (P) */ 187 pageout_queue:1,/* page is on queue for pageout (P) */ 188 laundry:1, /* page is being cleaned now (P)*/ 189 reference:1, /* page has been used (P) */ 190 gobbled:1, /* page used internally (P) */ 191 private:1, /* Page should not be returned to 192 * the free list (P) */ 193 no_cache:1, /* page is not to be cached and should 194 * be reused ahead of other pages (P) */ 195 __unused_pageq_bits:3; /* 3 bits available here */ 196 197 ppnum_t phys_page; /* Physical address of page, passed 198 * to pmap_enter (read-only) */ 199 200 /* 201 * The following word of flags is protected 202 * by the "VM object" lock. 203 */ 204 unsigned int 205 /* boolean_t */ busy:1, /* page is in transit (O) */ 206 wanted:1, /* someone is waiting for page (O) */ 207 tabled:1, /* page is in VP table (O) */ 208 fictitious:1, /* Physical page doesn't exist (O) */ 209 /* 210 * IMPORTANT: the "pmapped" bit can be turned on while holding the 211 * VM object "shared" lock. See vm_fault_enter(). 212 * This is OK as long as it's the only bit in this bit field that 213 * can be updated without holding the VM object "exclusive" lock. 214 */ 215 pmapped:1, /* page has been entered at some 216 * point into a pmap (O **shared**) */ 217 wpmapped:1, /* page has been entered at some 218 * point into a pmap for write (O) */ 219 pageout:1, /* page wired & busy for pageout (O) */ 220 absent:1, /* Data has been requested, but is 221 * not yet available (O) */ 222 error:1, /* Data manager was unable to provide 223 * data due to error (O) */ 224 dirty:1, /* Page must be cleaned (O) */ 225 cleaning:1, /* Page clean has begun (O) */ 226 precious:1, /* Page is precious; data must be 227 * returned even if clean (O) */ 228 clustered:1, /* page is not the faulted page (O) */ 229 overwriting:1, /* Request to unlock has been made 230 * without having data. (O) 231 * [See vm_fault_page_overwrite] */ 232 restart:1, /* Page was pushed higher in shadow 233 chain by copy_call-related pagers; 234 start again at top of chain */ 235 unusual:1, /* Page is absent, error, restart or 236 page locked */ 237 encrypted:1, /* encrypted for secure swap (O) */ 238 encrypted_cleaning:1, /* encrypting page */ 239 cs_validated:1, /* code-signing: page was checked */ 240 cs_tainted:1, /* code-signing: page is tainted */ 241 reusable:1, 242 lopage:1, 243 slid:1, 244 was_dirty:1, /* was this page previously dirty? */ 245 __unused_object_bits:8; /* 8 bits available here */ 246 247#if __LP64__ 248 unsigned int __unused_padding; /* Pad structure explicitly 249 * to 8-byte multiple for LP64 */ 250#endif 251}; 252 253#define DEBUG_ENCRYPTED_SWAP 1 254#if DEBUG_ENCRYPTED_SWAP 255#define ASSERT_PAGE_DECRYPTED(page) \ 256 MACRO_BEGIN \ 257 if ((page)->encrypted) { \ 258 panic("VM page %p should not be encrypted here\n", \ 259 (page)); \ 260 } \ 261 MACRO_END 262#else /* DEBUG_ENCRYPTED_SWAP */ 263#define ASSERT_PAGE_DECRYPTED(page) assert(!(page)->encrypted) 264#endif /* DEBUG_ENCRYPTED_SWAP */ 265 266typedef struct vm_page *vm_page_t; 267 268 269typedef struct vm_locks_array { 270 char pad __attribute__ ((aligned (64))); 271 lck_mtx_t vm_page_queue_lock2 __attribute__ ((aligned (64))); 272 lck_mtx_t vm_page_queue_free_lock2 __attribute__ ((aligned (64))); 273 char pad2 __attribute__ ((aligned (64))); 274} vm_locks_array_t; 275 276 277#define VM_PAGE_WIRED(m) ((!(m)->local && (m)->wire_count)) 278#define VM_PAGE_NULL ((vm_page_t) 0) 279#define NEXT_PAGE(m) ((vm_page_t) (m)->pageq.next) 280#define NEXT_PAGE_PTR(m) ((vm_page_t *) &(m)->pageq.next) 281 282/* 283 * XXX The unusual bit should not be necessary. Most of the bit 284 * XXX fields above really want to be masks. 285 */ 286 287/* 288 * For debugging, this macro can be defined to perform 289 * some useful check on a page structure. 290 */ 291 292#define VM_PAGE_CHECK(mem) \ 293 MACRO_BEGIN \ 294 VM_PAGE_QUEUES_ASSERT(mem, 1); \ 295 MACRO_END 296 297/* Page coloring: 298 * 299 * The free page list is actually n lists, one per color, 300 * where the number of colors is a function of the machine's 301 * cache geometry set at system initialization. To disable 302 * coloring, set vm_colors to 1 and vm_color_mask to 0. 303 * The boot-arg "colors" may be used to override vm_colors. 304 * Note that there is little harm in having more colors than needed. 305 */ 306 307#define MAX_COLORS 128 308#define DEFAULT_COLORS 32 309 310extern 311unsigned int vm_colors; /* must be in range 1..MAX_COLORS */ 312extern 313unsigned int vm_color_mask; /* must be (vm_colors-1) */ 314extern 315unsigned int vm_cache_geometry_colors; /* optimal #colors based on cache geometry */ 316 317/* 318 * Wired memory is a very limited resource and we can't let users exhaust it 319 * and deadlock the entire system. We enforce the following limits: 320 * 321 * vm_user_wire_limit (default: all memory minus vm_global_no_user_wire_amount) 322 * how much memory can be user-wired in one user task 323 * 324 * vm_global_user_wire_limit (default: same as vm_user_wire_limit) 325 * how much memory can be user-wired in all user tasks 326 * 327 * vm_global_no_user_wire_amount (default: VM_NOT_USER_WIREABLE) 328 * how much memory must remain user-unwired at any time 329 */ 330#define VM_NOT_USER_WIREABLE (64*1024*1024) /* 64MB */ 331extern 332vm_map_size_t vm_user_wire_limit; 333extern 334vm_map_size_t vm_global_user_wire_limit; 335extern 336vm_map_size_t vm_global_no_user_wire_amount; 337 338/* 339 * Each pageable resident page falls into one of three lists: 340 * 341 * free 342 * Available for allocation now. The free list is 343 * actually an array of lists, one per color. 344 * inactive 345 * Not referenced in any map, but still has an 346 * object/offset-page mapping, and may be dirty. 347 * This is the list of pages that should be 348 * paged out next. There are actually two 349 * inactive lists, one for pages brought in from 350 * disk or other backing store, and another 351 * for "zero-filled" pages. See vm_pageout_scan() 352 * for the distinction and usage. 353 * active 354 * A list of pages which have been placed in 355 * at least one physical map. This list is 356 * ordered, in LRU-like fashion. 357 */ 358 359 360#define VPL_LOCK_SPIN 1 361 362struct vpl { 363 unsigned int vpl_count; 364 queue_head_t vpl_queue; 365#ifdef VPL_LOCK_SPIN 366 lck_spin_t vpl_lock; 367#else 368 lck_mtx_t vpl_lock; 369 lck_mtx_ext_t vpl_lock_ext; 370#endif 371}; 372 373struct vplq { 374 union { 375 char cache_line_pad[128]; 376 struct vpl vpl; 377 } vpl_un; 378}; 379extern 380unsigned int vm_page_local_q_count; 381extern 382struct vplq *vm_page_local_q; 383extern 384unsigned int vm_page_local_q_soft_limit; 385extern 386unsigned int vm_page_local_q_hard_limit; 387extern 388vm_locks_array_t vm_page_locks; 389 390extern 391queue_head_t vm_page_queue_free[MAX_COLORS]; /* memory free queue */ 392extern 393queue_head_t vm_lopage_queue_free; /* low memory free queue */ 394extern 395queue_head_t vm_page_queue_active; /* active memory queue */ 396extern 397queue_head_t vm_page_queue_inactive; /* inactive memory queue for normal pages */ 398extern 399queue_head_t vm_page_queue_cleaned; /* clean-queue inactive memory */ 400extern 401queue_head_t vm_page_queue_anonymous; /* inactive memory queue for anonymous pages */ 402extern 403queue_head_t vm_page_queue_throttled; /* memory queue for throttled pageout pages */ 404 405extern 406vm_offset_t first_phys_addr; /* physical address for first_page */ 407extern 408vm_offset_t last_phys_addr; /* physical address for last_page */ 409 410extern 411unsigned int vm_page_free_count; /* How many pages are free? (sum of all colors) */ 412extern 413unsigned int vm_page_fictitious_count;/* How many fictitious pages are free? */ 414extern 415unsigned int vm_page_active_count; /* How many pages are active? */ 416extern 417unsigned int vm_page_inactive_count; /* How many pages are inactive? */ 418extern 419unsigned int vm_page_cleaned_count; /* How many pages are in the clean queue? */ 420extern 421unsigned int vm_page_throttled_count;/* How many inactives are throttled */ 422extern 423unsigned int vm_page_speculative_count; /* How many speculative pages are unclaimed? */ 424extern 425unsigned int vm_page_wire_count; /* How many pages are wired? */ 426extern 427unsigned int vm_page_wire_count_initial; /* How many pages wired at startup */ 428extern 429unsigned int vm_page_free_target; /* How many do we want free? */ 430extern 431unsigned int vm_page_free_min; /* When to wakeup pageout */ 432extern 433unsigned int vm_page_throttle_limit; /* When to throttle new page creation */ 434extern 435uint32_t vm_page_creation_throttle; /* When to throttle new page creation */ 436extern 437unsigned int vm_page_inactive_target;/* How many do we want inactive? */ 438extern 439unsigned int vm_page_anonymous_min; /* When it's ok to pre-clean */ 440extern 441unsigned int vm_page_inactive_min; /* When do wakeup pageout */ 442extern 443unsigned int vm_page_free_reserved; /* How many pages reserved to do pageout */ 444extern 445unsigned int vm_page_throttle_count; /* Count of page allocations throttled */ 446extern 447unsigned int vm_page_gobble_count; 448 449#if DEVELOPMENT || DEBUG 450extern 451unsigned int vm_page_speculative_used; 452#endif 453 454extern 455unsigned int vm_page_purgeable_count;/* How many pages are purgeable now ? */ 456extern 457unsigned int vm_page_purgeable_wired_count;/* How many purgeable pages are wired now ? */ 458extern 459uint64_t vm_page_purged_count; /* How many pages got purged so far ? */ 460 461extern unsigned int vm_page_free_wanted; 462 /* how many threads are waiting for memory */ 463 464extern unsigned int vm_page_free_wanted_privileged; 465 /* how many VM privileged threads are waiting for memory */ 466 467extern ppnum_t vm_page_fictitious_addr; 468 /* (fake) phys_addr of fictitious pages */ 469 470extern ppnum_t vm_page_guard_addr; 471 /* (fake) phys_addr of guard pages */ 472 473 474extern boolean_t vm_page_deactivate_hint; 475 476/* 477 0 = all pages avail ( default. ) 478 1 = disable high mem ( cap max pages to 4G) 479 2 = prefer himem 480*/ 481extern int vm_himemory_mode; 482 483extern boolean_t vm_lopage_needed; 484extern uint32_t vm_lopage_free_count; 485extern uint32_t vm_lopage_free_limit; 486extern uint32_t vm_lopage_lowater; 487extern boolean_t vm_lopage_refill; 488extern uint64_t max_valid_dma_address; 489extern ppnum_t max_valid_low_ppnum; 490 491/* 492 * Prototypes for functions exported by this module. 493 */ 494extern void vm_page_bootstrap( 495 vm_offset_t *startp, 496 vm_offset_t *endp) __attribute__((section("__TEXT, initcode"))); 497 498extern void vm_page_module_init(void) __attribute__((section("__TEXT, initcode"))); 499 500extern void vm_page_init_local_q(void); 501 502extern void vm_page_create( 503 ppnum_t start, 504 ppnum_t end); 505 506extern vm_page_t vm_page_lookup( 507 vm_object_t object, 508 vm_object_offset_t offset); 509 510extern vm_page_t vm_page_grab_fictitious(void); 511 512extern vm_page_t vm_page_grab_guard(void); 513 514extern void vm_page_release_fictitious( 515 vm_page_t page); 516 517extern void vm_page_more_fictitious(void); 518 519extern int vm_pool_low(void); 520 521extern vm_page_t vm_page_grab(void); 522 523extern vm_page_t vm_page_grablo(void); 524 525extern void vm_page_release( 526 vm_page_t page); 527 528extern boolean_t vm_page_wait( 529 int interruptible ); 530 531extern vm_page_t vm_page_alloc( 532 vm_object_t object, 533 vm_object_offset_t offset); 534 535extern vm_page_t vm_page_alloclo( 536 vm_object_t object, 537 vm_object_offset_t offset); 538 539extern vm_page_t vm_page_alloc_guard( 540 vm_object_t object, 541 vm_object_offset_t offset); 542 543extern void vm_page_init( 544 vm_page_t page, 545 ppnum_t phys_page, 546 boolean_t lopage); 547 548extern void vm_page_free( 549 vm_page_t page); 550 551extern void vm_page_free_unlocked( 552 vm_page_t page, 553 boolean_t remove_from_hash); 554 555extern void vm_page_activate( 556 vm_page_t page); 557 558extern void vm_page_deactivate( 559 vm_page_t page); 560 561extern void vm_page_deactivate_internal( 562 vm_page_t page, 563 boolean_t clear_hw_reference); 564 565extern void vm_page_enqueue_cleaned(vm_page_t page); 566 567extern void vm_page_lru( 568 vm_page_t page); 569 570extern void vm_page_speculate( 571 vm_page_t page, 572 boolean_t new); 573 574extern void vm_page_speculate_ageit( 575 struct vm_speculative_age_q *aq); 576 577extern void vm_page_reactivate_all_throttled(void); 578 579extern void vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks); 580 581extern void vm_page_rename( 582 vm_page_t page, 583 vm_object_t new_object, 584 vm_object_offset_t new_offset, 585 boolean_t encrypted_ok); 586 587extern void vm_page_insert( 588 vm_page_t page, 589 vm_object_t object, 590 vm_object_offset_t offset); 591 592extern void vm_page_insert_internal( 593 vm_page_t page, 594 vm_object_t object, 595 vm_object_offset_t offset, 596 boolean_t queues_lock_held, 597 boolean_t insert_in_hash, 598 boolean_t batch_pmap_op); 599 600extern void vm_page_replace( 601 vm_page_t mem, 602 vm_object_t object, 603 vm_object_offset_t offset); 604 605extern void vm_page_remove( 606 vm_page_t page, 607 boolean_t remove_from_hash); 608 609extern void vm_page_zero_fill( 610 vm_page_t page); 611 612extern void vm_page_part_zero_fill( 613 vm_page_t m, 614 vm_offset_t m_pa, 615 vm_size_t len); 616 617extern void vm_page_copy( 618 vm_page_t src_page, 619 vm_page_t dest_page); 620 621extern void vm_page_part_copy( 622 vm_page_t src_m, 623 vm_offset_t src_pa, 624 vm_page_t dst_m, 625 vm_offset_t dst_pa, 626 vm_size_t len); 627 628extern void vm_page_wire( 629 vm_page_t page); 630 631extern void vm_page_unwire( 632 vm_page_t page, 633 boolean_t queueit); 634 635extern void vm_set_page_size(void); 636 637extern void vm_page_gobble( 638 vm_page_t page); 639 640extern void vm_page_validate_cs(vm_page_t page); 641extern void vm_page_validate_cs_mapped( 642 vm_page_t page, 643 const void *kaddr); 644 645extern void vm_page_free_prepare_queues( 646 vm_page_t page); 647 648extern void vm_page_free_prepare_object( 649 vm_page_t page, 650 boolean_t remove_from_hash); 651 652#if CONFIG_JETSAM 653extern void memorystatus_update(unsigned int pages_avail); 654 655#define VM_CHECK_MEMORYSTATUS do { \ 656 memorystatus_update( \ 657 vm_page_active_count + \ 658 vm_page_inactive_count + \ 659 vm_page_speculative_count + \ 660 vm_page_free_count + \ 661 (VM_DYNAMIC_PAGING_ENABLED(memory_manager_default) ? 0 : vm_page_purgeable_count) \ 662 ); \ 663 } while(0) 664#else 665#define VM_CHECK_MEMORYSTATUS do {} while(0) 666#endif 667 668/* 669 * Functions implemented as macros. m->wanted and m->busy are 670 * protected by the object lock. 671 */ 672 673#if CONFIG_EMBEDDED 674#define SET_PAGE_DIRTY(m, set_pmap_modified) \ 675 MACRO_BEGIN \ 676 vm_page_t __page__ = (m); \ 677 if (__page__->dirty == FALSE && (set_pmap_modified)) { \ 678 pmap_set_modify(__page__->phys_page); \ 679 } \ 680 __page__->dirty = TRUE; \ 681 MACRO_END 682#else /* CONFIG_EMBEDDED */ 683#define SET_PAGE_DIRTY(m, set_pmap_modified) \ 684 MACRO_BEGIN \ 685 vm_page_t __page__ = (m); \ 686 __page__->dirty = TRUE; \ 687 MACRO_END 688#endif /* CONFIG_EMBEDDED */ 689 690#define PAGE_ASSERT_WAIT(m, interruptible) \ 691 (((m)->wanted = TRUE), \ 692 assert_wait((event_t) (m), (interruptible))) 693 694#define PAGE_SLEEP(o, m, interruptible) \ 695 (((m)->wanted = TRUE), \ 696 thread_sleep_vm_object((o), (m), (interruptible))) 697 698#define PAGE_WAKEUP_DONE(m) \ 699 MACRO_BEGIN \ 700 (m)->busy = FALSE; \ 701 if ((m)->wanted) { \ 702 (m)->wanted = FALSE; \ 703 thread_wakeup((event_t) (m)); \ 704 } \ 705 MACRO_END 706 707#define PAGE_WAKEUP(m) \ 708 MACRO_BEGIN \ 709 if ((m)->wanted) { \ 710 (m)->wanted = FALSE; \ 711 thread_wakeup((event_t) (m)); \ 712 } \ 713 MACRO_END 714 715#define VM_PAGE_FREE(p) \ 716 MACRO_BEGIN \ 717 vm_page_free_unlocked(p, TRUE); \ 718 MACRO_END 719 720#define VM_PAGE_GRAB_FICTITIOUS(M) \ 721 MACRO_BEGIN \ 722 while ((M = vm_page_grab_fictitious()) == VM_PAGE_NULL) \ 723 vm_page_more_fictitious(); \ 724 MACRO_END 725 726#define VM_PAGE_WAIT() ((void)vm_page_wait(THREAD_UNINT)) 727 728#define vm_page_queue_lock (vm_page_locks.vm_page_queue_lock2) 729#define vm_page_queue_free_lock (vm_page_locks.vm_page_queue_free_lock2) 730 731#define vm_page_lock_queues() lck_mtx_lock(&vm_page_queue_lock) 732#define vm_page_unlock_queues() lck_mtx_unlock(&vm_page_queue_lock) 733 734#define vm_page_lockspin_queues() lck_mtx_lock_spin(&vm_page_queue_lock) 735#define vm_page_trylockspin_queues() lck_mtx_try_lock_spin(&vm_page_queue_lock) 736#define vm_page_lockconvert_queues() lck_mtx_convert_spin(&vm_page_queue_lock) 737 738#ifdef VPL_LOCK_SPIN 739#define VPL_LOCK_INIT(vlq, vpl_grp, vpl_attr) lck_spin_init(&vlq->vpl_lock, vpl_grp, vpl_attr) 740#define VPL_LOCK(vpl) lck_spin_lock(vpl) 741#define VPL_UNLOCK(vpl) lck_spin_unlock(vpl) 742#else 743#define VPL_LOCK_INIT(vlq, vpl_grp, vpl_attr) lck_mtx_init_ext(&vlq->vpl_lock, &vlq->vpl_lock_ext, vpl_grp, vpl_attr) 744#define VPL_LOCK(vpl) lck_mtx_lock_spin(vpl) 745#define VPL_UNLOCK(vpl) lck_mtx_unlock(vpl) 746#endif 747 748#if MACH_ASSERT 749extern void vm_page_queues_assert(vm_page_t mem, int val); 750#define VM_PAGE_QUEUES_ASSERT(mem, val) vm_page_queues_assert((mem), (val)) 751#else 752#define VM_PAGE_QUEUES_ASSERT(mem, val) 753#endif 754 755 756/* 757 * 'vm_fault_enter' will place newly created pages (zero-fill and COW) onto the 758 * local queues if they exist... its the only spot in the system where we add pages 759 * to those queues... once on those queues, those pages can only move to one of the 760 * global page queues or the free queues... they NEVER move from local q to local q. 761 * the 'local' state is stable when VM_PAGE_QUEUES_REMOVE is called since we're behind 762 * the global vm_page_queue_lock at this point... we still need to take the local lock 763 * in case this operation is being run on a different CPU then the local queue's identity, 764 * but we don't have to worry about the page moving to a global queue or becoming wired 765 * while we're grabbing the local lock since those operations would require the global 766 * vm_page_queue_lock to be held, and we already own it. 767 * 768 * this is why its safe to utilze the wire_count field in the vm_page_t as the local_id... 769 * 'wired' and local are ALWAYS mutually exclusive conditions. 770 */ 771 772#define VM_PAGE_QUEUES_REMOVE(mem) \ 773 MACRO_BEGIN \ 774 VM_PAGE_QUEUES_ASSERT(mem, 1); \ 775 assert(!mem->laundry); \ 776/* \ 777 * if (mem->pageout_queue) \ 778 * NOTE: VM_PAGE_QUEUES_REMOVE does not deal with removing pages from the pageout queue... \ 779 * the caller is responsible for determing if the page is on that queue, and if so, must \ 780 * either first remove it (it needs both the page queues lock and the object lock to do \ 781 * this via vm_pageout_steal_laundry), or avoid the call to VM_PAGE_QUEUES_REMOVE \ 782 */ \ 783 if (mem->local) { \ 784 struct vpl *lq; \ 785 assert(mem->object != kernel_object); \ 786 assert(!mem->inactive && !mem->speculative); \ 787 assert(!mem->active && !mem->throttled); \ 788 assert(!mem->clean_queue); \ 789 assert(!mem->fictitious); \ 790 lq = &vm_page_local_q[mem->local_id].vpl_un.vpl; \ 791 VPL_LOCK(&lq->vpl_lock); \ 792 queue_remove(&lq->vpl_queue, \ 793 mem, vm_page_t, pageq); \ 794 mem->local = FALSE; \ 795 mem->local_id = 0; \ 796 lq->vpl_count--; \ 797 VPL_UNLOCK(&lq->vpl_lock); \ 798 } \ 799 \ 800 else if (mem->active) { \ 801 assert(mem->object != kernel_object); \ 802 assert(!mem->inactive && !mem->speculative); \ 803 assert(!mem->clean_queue); \ 804 assert(!mem->throttled); \ 805 assert(!mem->fictitious); \ 806 queue_remove(&vm_page_queue_active, \ 807 mem, vm_page_t, pageq); \ 808 mem->active = FALSE; \ 809 vm_page_active_count--; \ 810 } \ 811 \ 812 else if (mem->inactive) { \ 813 assert(mem->object != kernel_object); \ 814 assert(!mem->active && !mem->speculative); \ 815 assert(!mem->throttled); \ 816 assert(!mem->fictitious); \ 817 vm_page_inactive_count--; \ 818 if (mem->clean_queue) { \ 819 queue_remove(&vm_page_queue_cleaned, \ 820 mem, vm_page_t, pageq); \ 821 mem->clean_queue = FALSE; \ 822 vm_page_cleaned_count--; \ 823 } else { \ 824 if (mem->object->internal) { \ 825 queue_remove(&vm_page_queue_anonymous, \ 826 mem, vm_page_t, pageq); \ 827 vm_page_anonymous_count--; \ 828 } else { \ 829 queue_remove(&vm_page_queue_inactive, \ 830 mem, vm_page_t, pageq); \ 831 } \ 832 vm_purgeable_q_advance_all(); \ 833 } \ 834 mem->inactive = FALSE; \ 835 } \ 836 \ 837 else if (mem->throttled) { \ 838 assert(!mem->active && !mem->inactive); \ 839 assert(!mem->speculative); \ 840 assert(!mem->fictitious); \ 841 queue_remove(&vm_page_queue_throttled, \ 842 mem, vm_page_t, pageq); \ 843 mem->throttled = FALSE; \ 844 vm_page_throttled_count--; \ 845 } \ 846 \ 847 else if (mem->speculative) { \ 848 assert(!mem->active && !mem->inactive); \ 849 assert(!mem->throttled); \ 850 assert(!mem->fictitious); \ 851 remque(&mem->pageq); \ 852 mem->speculative = FALSE; \ 853 vm_page_speculative_count--; \ 854 } \ 855 \ 856 else if (mem->pageq.next || mem->pageq.prev) \ 857 panic("VM_PAGE_QUEUES_REMOVE: unmarked page on Q"); \ 858 mem->pageq.next = NULL; \ 859 mem->pageq.prev = NULL; \ 860 VM_PAGE_QUEUES_ASSERT(mem, 0); \ 861 MACRO_END 862 863 864#define VM_PAGE_ENQUEUE_INACTIVE(mem, first) \ 865 MACRO_BEGIN \ 866 VM_PAGE_QUEUES_ASSERT(mem, 0); \ 867 assert(!mem->fictitious); \ 868 assert(!mem->laundry); \ 869 assert(!mem->pageout_queue); \ 870 if (mem->object->internal) { \ 871 if (first == TRUE) \ 872 queue_enter_first(&vm_page_queue_anonymous, mem, vm_page_t, pageq); \ 873 else \ 874 queue_enter(&vm_page_queue_anonymous, mem, vm_page_t, pageq); \ 875 vm_page_anonymous_count++; \ 876 } else { \ 877 if (first == TRUE) \ 878 queue_enter_first(&vm_page_queue_inactive, mem, vm_page_t, pageq); \ 879 else \ 880 queue_enter(&vm_page_queue_inactive, mem, vm_page_t, pageq); \ 881 } \ 882 mem->inactive = TRUE; \ 883 vm_page_inactive_count++; \ 884 token_new_pagecount++; \ 885 MACRO_END 886 887 888#if DEVELOPMENT || DEBUG 889#define VM_PAGE_SPECULATIVE_USED_ADD() \ 890 MACRO_BEGIN \ 891 OSAddAtomic(1, &vm_page_speculative_used); \ 892 MACRO_END 893#else 894#define VM_PAGE_SPECULATIVE_USED_ADD() 895#endif 896 897 898#define VM_PAGE_CONSUME_CLUSTERED(mem) \ 899 MACRO_BEGIN \ 900 if (mem->clustered) { \ 901 assert(mem->object); \ 902 mem->object->pages_used++; \ 903 mem->clustered = FALSE; \ 904 VM_PAGE_SPECULATIVE_USED_ADD(); \ 905 } \ 906 MACRO_END 907 908 909 910#define DW_vm_page_unwire 0x01 911#define DW_vm_page_wire 0x02 912#define DW_vm_page_free 0x04 913#define DW_vm_page_activate 0x08 914#define DW_vm_page_deactivate_internal 0x10 915#define DW_vm_page_speculate 0x20 916#define DW_vm_page_lru 0x40 917#define DW_vm_pageout_throttle_up 0x80 918#define DW_PAGE_WAKEUP 0x100 919#define DW_clear_busy 0x200 920#define DW_clear_reference 0x400 921#define DW_set_reference 0x800 922#define DW_move_page 0x1000 923#define DW_VM_PAGE_QUEUES_REMOVE 0x2000 924#define DW_enqueue_cleaned 0x4000 925 926struct vm_page_delayed_work { 927 vm_page_t dw_m; 928 int dw_mask; 929}; 930 931void vm_page_do_delayed_work(vm_object_t object, struct vm_page_delayed_work *dwp, int dw_count); 932 933extern unsigned int vm_max_delayed_work_limit; 934 935#define DEFAULT_DELAYED_WORK_LIMIT 32 936 937#define DELAYED_WORK_LIMIT(max) ((vm_max_delayed_work_limit >= max ? max : vm_max_delayed_work_limit)) 938 939/* 940 * vm_page_do_delayed_work may need to drop the object lock... 941 * if it does, we need the pages it's looking at to 942 * be held stable via the busy bit, so if busy isn't already 943 * set, we need to set it and ask vm_page_do_delayed_work 944 * to clear it and wakeup anyone that might have blocked on 945 * it once we're done processing the page. 946 */ 947 948#define VM_PAGE_ADD_DELAYED_WORK(dwp, mem, dw_cnt) \ 949 MACRO_BEGIN \ 950 if (mem->busy == FALSE) { \ 951 mem->busy = TRUE; \ 952 if ( !(dwp->dw_mask & DW_vm_page_free)) \ 953 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP); \ 954 } \ 955 dwp->dw_m = mem; \ 956 dwp++; \ 957 dw_cnt++; \ 958 MACRO_END 959 960extern vm_page_t vm_object_page_grab(vm_object_t); 961 962 963#endif /* _VM_VM_PAGE_H_ */ 964