1/* 2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28/* 29 * @OSF_COPYRIGHT@ 30 */ 31/* 32 * Mach Operating System 33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University 34 * All Rights Reserved. 35 * 36 * Permission to use, copy, modify and distribute this software and its 37 * documentation is hereby granted, provided that both the copyright 38 * notice and this permission notice appear in all copies of the 39 * software, derivative works or modified versions, and any portions 40 * thereof, and that both notices appear in supporting documentation. 41 * 42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR 44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 45 * 46 * Carnegie Mellon requests users of this software to return to 47 * 48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 49 * School of Computer Science 50 * Carnegie Mellon University 51 * Pittsburgh PA 15213-3890 52 * 53 * any improvements or extensions that they make and grant Carnegie Mellon 54 * the rights to redistribute these changes. 55 */ 56/* 57 */ 58/* 59 * File: vm/vm_pageout.h 60 * Author: Avadis Tevanian, Jr. 61 * Date: 1986 62 * 63 * Declarations for the pageout daemon interface. 64 */ 65 66#ifndef _VM_VM_PAGEOUT_H_ 67#define _VM_VM_PAGEOUT_H_ 68 69#ifdef KERNEL_PRIVATE 70 71#include <mach/mach_types.h> 72#include <mach/boolean.h> 73#include <mach/machine/vm_types.h> 74#include <mach/memory_object_types.h> 75 76#include <kern/kern_types.h> 77#include <kern/lock.h> 78 79#include <libkern/OSAtomic.h> 80 81 82#include <vm/vm_options.h> 83 84#ifdef MACH_KERNEL_PRIVATE 85#include <vm/vm_page.h> 86#endif 87 88#include <sys/kdebug.h> 89 90#define VM_PAGE_AVAILABLE_COUNT() ((unsigned int)(vm_page_cleaned_count)) 91 92/* externally manipulated counters */ 93extern unsigned int vm_pageout_cleaned_reactivated, vm_pageout_cleaned_fault_reactivated, vm_pageout_cleaned_commit_reactivated; 94 95#if CONFIG_JETSAM 96#define LATENCY_JETSAM FALSE 97#if LATENCY_JETSAM 98#define JETSAM_LATENCY_TOKEN_AGE 3000 /* 3ms */ 99#define NUM_OF_JETSAM_LATENCY_TOKENS 1000 100 101#define JETSAM_AGE_NOTIFY_CRITICAL 1500000 /* 1.5 secs */ 102 103extern boolean_t jlp_init; 104extern uint64_t jlp_time, jlp_current; 105extern unsigned int latency_jetsam_wakeup; 106#endif /* LATENCY_JETSAM */ 107#endif /* CONFIG_JETSAM */ 108 109#if CONFIG_FREEZE 110extern boolean_t memorystatus_freeze_enabled; 111#define VM_DYNAMIC_PAGING_ENABLED(port) ((COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) || (memorystatus_freeze_enabled == FALSE && IP_VALID(port))) 112#else 113#define VM_DYNAMIC_PAGING_ENABLED(port) (COMPRESSED_PAGER_IS_ACTIVE || IP_VALID(port)) 114#endif 115 116 117extern int vm_debug_events; 118 119#define VMF_CHECK_ZFDELAY 0x100 120#define VMF_COWDELAY 0x101 121#define VMF_ZFDELAY 0x102 122#define VMF_COMPRESSORDELAY 0x103 123 124#define VM_PAGEOUT_SCAN 0x104 125#define VM_PAGEOUT_BALANCE 0x105 126#define VM_PAGEOUT_FREELIST 0x106 127#define VM_PAGEOUT_PURGEONE 0x107 128#define VM_PAGEOUT_CACHE_EVICT 0x108 129#define VM_PAGEOUT_THREAD_BLOCK 0x109 130#define VM_PAGEOUT_JETSAM 0x10A 131#define VM_PAGEOUT_PAGE_TOKEN 0x10B 132 133#define VM_UPL_PAGE_WAIT 0x120 134#define VM_IOPL_PAGE_WAIT 0x121 135#define VM_PAGE_WAIT_BLOCK 0x122 136 137#define VM_PRESSURE_EVENT 0x130 138#define VM_EXECVE 0x131 139#define VM_WAKEUP_COMPACTOR_SWAPPER 0x132 140 141#define VM_DEBUG_EVENT(name, event, control, arg1, arg2, arg3, arg4) \ 142 MACRO_BEGIN \ 143 if (vm_debug_events) { \ 144 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, event)) | control, arg1, arg2, arg3, arg4, 0); \ 145 } \ 146 MACRO_END 147 148extern void inline memoryshot(unsigned int event, unsigned int control); 149 150extern kern_return_t vm_map_create_upl( 151 vm_map_t map, 152 vm_map_address_t offset, 153 upl_size_t *upl_size, 154 upl_t *upl, 155 upl_page_info_array_t page_list, 156 unsigned int *count, 157 int *flags); 158 159extern ppnum_t upl_get_highest_page( 160 upl_t upl); 161 162extern upl_size_t upl_get_size( 163 upl_t upl); 164 165 166#ifndef MACH_KERNEL_PRIVATE 167typedef struct vm_page *vm_page_t; 168#endif 169 170extern void vm_page_free_list( 171 vm_page_t mem, 172 boolean_t prepare_object); 173 174extern kern_return_t vm_page_alloc_list( 175 int page_count, 176 int flags, 177 vm_page_t * list); 178 179extern void vm_page_set_offset(vm_page_t page, vm_object_offset_t offset); 180extern vm_object_offset_t vm_page_get_offset(vm_page_t page); 181extern ppnum_t vm_page_get_phys_page(vm_page_t page); 182extern vm_page_t vm_page_get_next(vm_page_t page); 183 184extern kern_return_t mach_vm_pressure_level_monitor(boolean_t wait_for_pressure, unsigned int *pressure_level); 185 186#ifdef MACH_KERNEL_PRIVATE 187 188#include <vm/vm_page.h> 189 190extern unsigned int vm_pageout_scan_event_counter; 191extern unsigned int vm_page_anonymous_count; 192 193 194/* 195 * must hold the page queues lock to 196 * manipulate this structure 197 */ 198struct vm_pageout_queue { 199 queue_head_t pgo_pending; /* laundry pages to be processed by pager's iothread */ 200 unsigned int pgo_laundry; /* current count of laundry pages on queue or in flight */ 201 unsigned int pgo_maxlaundry; 202 uint64_t pgo_tid; /* thread ID of I/O thread that services this queue */ 203 uint8_t pgo_lowpriority; /* iothread is set to use low priority I/O */ 204 205 unsigned int pgo_idle:1, /* iothread is blocked waiting for work to do */ 206 pgo_busy:1, /* iothread is currently processing request from pgo_pending */ 207 pgo_throttled:1,/* vm_pageout_scan thread needs a wakeup when pgo_laundry drops */ 208 pgo_draining:1, 209 pgo_inited:1, 210 :0; 211}; 212 213#define VM_PAGE_Q_THROTTLED(q) \ 214 ((q)->pgo_laundry >= (q)->pgo_maxlaundry) 215 216extern struct vm_pageout_queue vm_pageout_queue_internal; 217extern struct vm_pageout_queue vm_pageout_queue_external; 218 219 220/* 221 * Routines exported to Mach. 222 */ 223extern void vm_pageout(void); 224 225extern kern_return_t vm_pageout_internal_start(void); 226 227extern void vm_pageout_object_terminate( 228 vm_object_t object); 229 230extern void vm_pageout_cluster( 231 vm_page_t m, 232 boolean_t pageout); 233 234extern void vm_pageout_initialize_page( 235 vm_page_t m); 236 237extern void vm_pageclean_setup( 238 vm_page_t m, 239 vm_page_t new_m, 240 vm_object_t new_object, 241 vm_object_offset_t new_offset); 242 243/* UPL exported routines and structures */ 244 245#define upl_lock_init(object) lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr) 246#define upl_lock_destroy(object) lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp) 247#define upl_lock(object) lck_mtx_lock(&(object)->Lock) 248#define upl_unlock(object) lck_mtx_unlock(&(object)->Lock) 249 250#define MAX_VECTOR_UPL_ELEMENTS 8 251 252struct _vector_upl_iostates{ 253 upl_offset_t offset; 254 upl_size_t size; 255}; 256 257typedef struct _vector_upl_iostates vector_upl_iostates_t; 258 259struct _vector_upl { 260 upl_size_t size; 261 uint32_t num_upls; 262 uint32_t invalid_upls; 263 uint32_t _reserved; 264 vm_map_t submap; 265 vm_offset_t submap_dst_addr; 266 vm_object_offset_t offset; 267 upl_t upl_elems[MAX_VECTOR_UPL_ELEMENTS]; 268 upl_page_info_array_t pagelist; 269 vector_upl_iostates_t upl_iostates[MAX_VECTOR_UPL_ELEMENTS]; 270}; 271 272typedef struct _vector_upl* vector_upl_t; 273 274/* universal page list structure */ 275 276#if UPL_DEBUG 277#define UPL_DEBUG_STACK_FRAMES 16 278#define UPL_DEBUG_COMMIT_RECORDS 4 279 280struct ucd { 281 upl_offset_t c_beg; 282 upl_offset_t c_end; 283 int c_aborted; 284 void * c_retaddr[UPL_DEBUG_STACK_FRAMES]; 285}; 286#endif 287 288 289struct upl { 290 decl_lck_mtx_data(, Lock) /* Synchronization */ 291 int ref_count; 292 int ext_ref_count; 293 int flags; 294 vm_object_t src_object; /* object derived from */ 295 vm_object_offset_t offset; 296 upl_size_t size; /* size in bytes of the address space */ 297 vm_offset_t kaddr; /* secondary mapping in kernel */ 298 vm_object_t map_object; 299 ppnum_t highest_page; 300 void* vector_upl; 301#if UPL_DEBUG 302 uintptr_t ubc_alias1; 303 uintptr_t ubc_alias2; 304 queue_chain_t uplq; /* List of outstanding upls on an obj */ 305 306 thread_t upl_creator; 307 uint32_t upl_state; 308 uint32_t upl_commit_index; 309 void *upl_create_retaddr[UPL_DEBUG_STACK_FRAMES]; 310 311 struct ucd upl_commit_records[UPL_DEBUG_COMMIT_RECORDS]; 312#endif /* UPL_DEBUG */ 313}; 314 315/* upl struct flags */ 316#define UPL_PAGE_LIST_MAPPED 0x1 317#define UPL_KERNEL_MAPPED 0x2 318#define UPL_CLEAR_DIRTY 0x4 319#define UPL_COMPOSITE_LIST 0x8 320#define UPL_INTERNAL 0x10 321#define UPL_PAGE_SYNC_DONE 0x20 322#define UPL_DEVICE_MEMORY 0x40 323#define UPL_PAGEOUT 0x80 324#define UPL_LITE 0x100 325#define UPL_IO_WIRE 0x200 326#define UPL_ACCESS_BLOCKED 0x400 327#define UPL_ENCRYPTED 0x800 328#define UPL_SHADOWED 0x1000 329#define UPL_KERNEL_OBJECT 0x2000 330#define UPL_VECTOR 0x4000 331#define UPL_SET_DIRTY 0x8000 332#define UPL_HAS_BUSY 0x10000 333 334/* flags for upl_create flags parameter */ 335#define UPL_CREATE_EXTERNAL 0 336#define UPL_CREATE_INTERNAL 0x1 337#define UPL_CREATE_LITE 0x2 338 339extern upl_t vector_upl_create(vm_offset_t); 340extern void vector_upl_deallocate(upl_t); 341extern boolean_t vector_upl_is_valid(upl_t); 342extern boolean_t vector_upl_set_subupl(upl_t, upl_t, u_int32_t); 343extern void vector_upl_set_pagelist(upl_t); 344extern void vector_upl_set_submap(upl_t, vm_map_t, vm_offset_t); 345extern void vector_upl_get_submap(upl_t, vm_map_t*, vm_offset_t*); 346extern void vector_upl_set_iostate(upl_t, upl_t, upl_offset_t, upl_size_t); 347extern void vector_upl_get_iostate(upl_t, upl_t, upl_offset_t*, upl_size_t*); 348extern void vector_upl_get_iostate_byindex(upl_t, uint32_t, upl_offset_t*, upl_size_t*); 349extern upl_t vector_upl_subupl_byindex(upl_t , uint32_t); 350extern upl_t vector_upl_subupl_byoffset(upl_t , upl_offset_t*, upl_size_t*); 351 352extern void vm_object_set_pmap_cache_attr( 353 vm_object_t object, 354 upl_page_info_array_t user_page_list, 355 unsigned int num_pages, 356 boolean_t batch_pmap_op); 357 358extern kern_return_t vm_object_iopl_request( 359 vm_object_t object, 360 vm_object_offset_t offset, 361 upl_size_t size, 362 upl_t *upl_ptr, 363 upl_page_info_array_t user_page_list, 364 unsigned int *page_list_count, 365 int cntrl_flags); 366 367extern kern_return_t vm_object_super_upl_request( 368 vm_object_t object, 369 vm_object_offset_t offset, 370 upl_size_t size, 371 upl_size_t super_cluster, 372 upl_t *upl, 373 upl_page_info_t *user_page_list, 374 unsigned int *page_list_count, 375 int cntrl_flags); 376 377/* should be just a regular vm_map_enter() */ 378extern kern_return_t vm_map_enter_upl( 379 vm_map_t map, 380 upl_t upl, 381 vm_map_offset_t *dst_addr); 382 383/* should be just a regular vm_map_remove() */ 384extern kern_return_t vm_map_remove_upl( 385 vm_map_t map, 386 upl_t upl); 387 388/* wired page list structure */ 389typedef uint32_t *wpl_array_t; 390 391extern void vm_page_free_reserve(int pages); 392 393extern void vm_pageout_throttle_down(vm_page_t page); 394extern void vm_pageout_throttle_up(vm_page_t page); 395 396/* 397 * ENCRYPTED SWAP: 398 */ 399extern void upl_encrypt( 400 upl_t upl, 401 upl_offset_t crypt_offset, 402 upl_size_t crypt_size); 403extern void vm_page_encrypt( 404 vm_page_t page, 405 vm_map_offset_t kernel_map_offset); 406extern boolean_t vm_pages_encrypted; /* are there encrypted pages ? */ 407extern void vm_page_decrypt( 408 vm_page_t page, 409 vm_map_offset_t kernel_map_offset); 410extern kern_return_t vm_paging_map_object( 411 vm_page_t page, 412 vm_object_t object, 413 vm_object_offset_t offset, 414 vm_prot_t protection, 415 boolean_t can_unlock_object, 416 vm_map_size_t *size, /* IN/OUT */ 417 vm_map_offset_t *address, /* OUT */ 418 boolean_t *need_unmap); /* OUT */ 419extern void vm_paging_unmap_object( 420 vm_object_t object, 421 vm_map_offset_t start, 422 vm_map_offset_t end); 423decl_simple_lock_data(extern, vm_paging_lock) 424 425/* 426 * Backing store throttle when BS is exhausted 427 */ 428extern unsigned int vm_backing_store_low; 429 430extern void vm_pageout_steal_laundry( 431 vm_page_t page, 432 boolean_t queues_locked); 433 434extern boolean_t vm_page_is_slideable(vm_page_t m); 435 436extern kern_return_t vm_page_slide(vm_page_t page, vm_map_offset_t kernel_mapping_offset); 437#endif /* MACH_KERNEL_PRIVATE */ 438 439#if UPL_DEBUG 440extern kern_return_t upl_ubc_alias_set( 441 upl_t upl, 442 uintptr_t alias1, 443 uintptr_t alias2); 444extern int upl_ubc_alias_get( 445 upl_t upl, 446 uintptr_t * al, 447 uintptr_t * al2); 448#endif /* UPL_DEBUG */ 449 450extern void vm_countdirtypages(void); 451 452extern void vm_backing_store_disable( 453 boolean_t suspend); 454 455extern kern_return_t upl_transpose( 456 upl_t upl1, 457 upl_t upl2); 458 459extern kern_return_t mach_vm_pressure_monitor( 460 boolean_t wait_for_pressure, 461 unsigned int nsecs_monitored, 462 unsigned int *pages_reclaimed_p, 463 unsigned int *pages_wanted_p); 464 465extern kern_return_t 466vm_set_buffer_cleanup_callout( 467 boolean_t (*func)(int)); 468 469struct vm_page_stats_reusable { 470 SInt32 reusable_count; 471 uint64_t reusable; 472 uint64_t reused; 473 uint64_t reused_wire; 474 uint64_t reused_remove; 475 uint64_t all_reusable_calls; 476 uint64_t partial_reusable_calls; 477 uint64_t all_reuse_calls; 478 uint64_t partial_reuse_calls; 479 uint64_t reusable_pages_success; 480 uint64_t reusable_pages_failure; 481 uint64_t reusable_pages_shared; 482 uint64_t reuse_pages_success; 483 uint64_t reuse_pages_failure; 484 uint64_t can_reuse_success; 485 uint64_t can_reuse_failure; 486 uint64_t reusable_reclaimed; 487}; 488extern struct vm_page_stats_reusable vm_page_stats_reusable; 489 490extern int hibernate_flush_memory(void); 491extern void hibernate_create_paddr_map(void); 492 493extern int vm_compressor_mode; 494extern int vm_compressor_thread_count; 495 496#define VM_PAGER_DEFAULT 0x1 /* Use default pager. */ 497#define VM_PAGER_COMPRESSOR_NO_SWAP 0x2 /* In-core compressor only. */ 498#define VM_PAGER_COMPRESSOR_WITH_SWAP 0x4 /* In-core compressor + swap backend. */ 499#define VM_PAGER_FREEZER_DEFAULT 0x8 /* Freezer backed by default pager.*/ 500#define VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP 0x10 /* Freezer backed by in-core compressor only i.e. frozen data remain in-core compressed.*/ 501#define VM_PAGER_FREEZER_COMPRESSOR_WITH_SWAP 0x20 /* Freezer backed by in-core compressor with swap support too.*/ 502 503#define VM_PAGER_MAX_MODES 6 /* Total number of vm compressor modes supported */ 504 505#define DEFAULT_PAGER_IS_ACTIVE ((vm_compressor_mode & VM_PAGER_DEFAULT) == VM_PAGER_DEFAULT) 506 507#define COMPRESSED_PAGER_IS_ACTIVE (vm_compressor_mode & (VM_PAGER_COMPRESSOR_NO_SWAP | VM_PAGER_COMPRESSOR_WITH_SWAP)) 508 509#define DEFAULT_FREEZER_IS_ACTIVE ((vm_compressor_mode & VM_PAGER_FREEZER_DEFAULT) == VM_PAGER_FREEZER_DEFAULT) 510 511#define DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE (vm_compressor_mode & (VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP | VM_PAGER_FREEZER_COMPRESSOR_WITH_SWAP)) 512 513 514#endif /* KERNEL_PRIVATE */ 515 516#endif /* _VM_VM_PAGEOUT_H_ */ 517