1/* 2 * Copyright (c) 2004-2007 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 29#ifdef XNU_KERNEL_PRIVATE 30 31#ifndef _VM_VM_PROTOS_H_ 32#define _VM_VM_PROTOS_H_ 33 34#include <mach/mach_types.h> 35#include <kern/kern_types.h> 36 37/* 38 * This file contains various type definitions and routine prototypes 39 * that are needed to avoid compilation warnings for VM code (in osfmk, 40 * default_pager and bsd). 41 * Most of these should eventually go into more appropriate header files. 42 * 43 * Include it after all other header files since it doesn't include any 44 * type definitions and it works around some conflicts with other header 45 * files. 46 */ 47 48/* 49 * iokit 50 */ 51extern kern_return_t device_data_action( 52 uintptr_t device_handle, 53 ipc_port_t device_pager, 54 vm_prot_t protection, 55 vm_object_offset_t offset, 56 vm_size_t size); 57 58extern kern_return_t device_close( 59 uintptr_t device_handle); 60 61/* 62 * default_pager 63 */ 64extern int start_def_pager( 65 char *bs_device); 66extern int default_pager_init_flag; 67 68/* 69 * osfmk 70 */ 71#ifndef _IPC_IPC_PORT_H_ 72extern mach_port_name_t ipc_port_copyout_send( 73 ipc_port_t sright, 74 ipc_space_t space); 75extern task_t port_name_to_task( 76 mach_port_name_t name); 77#endif /* _IPC_IPC_PORT_H_ */ 78 79extern ipc_space_t get_task_ipcspace( 80 task_t t); 81 82/* Some loose-ends VM stuff */ 83 84extern vm_map_t kalloc_map; 85extern vm_size_t msg_ool_size_small; 86extern vm_map_t zone_map; 87 88extern void consider_machine_adjust(void); 89extern pmap_t get_map_pmap(vm_map_t); 90extern vm_map_offset_t get_map_min(vm_map_t); 91extern vm_map_offset_t get_map_max(vm_map_t); 92extern vm_map_size_t get_vmmap_size(vm_map_t); 93extern int get_vmmap_entries(vm_map_t); 94 95int vm_map_page_mask(vm_map_t); 96 97extern boolean_t coredumpok(vm_map_t map, vm_offset_t va); 98 99/* 100 * VM routines that used to be published to 101 * user space, and are now restricted to the kernel. 102 * 103 * They should eventually go away entirely - 104 * to be replaced with standard vm_map() and 105 * vm_deallocate() calls. 106 */ 107 108extern kern_return_t vm_upl_map 109( 110 vm_map_t target_task, 111 upl_t upl, 112 vm_address_t *address 113); 114 115extern kern_return_t vm_upl_unmap 116( 117 vm_map_t target_task, 118 upl_t upl 119); 120 121extern kern_return_t vm_region_object_create 122( 123 vm_map_t target_task, 124 vm_size_t size, 125 ipc_port_t *object_handle 126); 127 128extern mach_vm_offset_t mach_get_vm_start(vm_map_t); 129extern mach_vm_offset_t mach_get_vm_end(vm_map_t); 130 131#if CONFIG_CODE_DECRYPTION 132struct pager_crypt_info; 133extern kern_return_t vm_map_apple_protected( 134 vm_map_t map, 135 vm_map_offset_t start, 136 vm_map_offset_t end, 137 struct pager_crypt_info *crypt_info); 138extern void apple_protect_pager_bootstrap(void); 139extern memory_object_t apple_protect_pager_setup(vm_object_t backing_object, 140 struct pager_crypt_info *crypt_info); 141#endif /* CONFIG_CODE_DECRYPTION */ 142 143struct vnode; 144extern void swapfile_pager_bootstrap(void); 145extern memory_object_t swapfile_pager_setup(struct vnode *vp); 146extern memory_object_control_t swapfile_pager_control(memory_object_t mem_obj); 147 148 149/* 150 * bsd 151 */ 152struct vnode; 153extern void vnode_pager_shutdown(void); 154extern void *upl_get_internal_page_list( 155 upl_t upl); 156 157typedef int pager_return_t; 158extern pager_return_t vnode_pagein( 159 struct vnode *, upl_t, 160 upl_offset_t, vm_object_offset_t, 161 upl_size_t, int, int *); 162extern pager_return_t vnode_pageout( 163 struct vnode *, upl_t, 164 upl_offset_t, vm_object_offset_t, 165 upl_size_t, int, int *); 166extern uint32_t vnode_trim (struct vnode *, int64_t offset, unsigned long len); 167extern memory_object_t vnode_pager_setup( 168 struct vnode *, memory_object_t); 169extern vm_object_offset_t vnode_pager_get_filesize( 170 struct vnode *); 171extern uint32_t vnode_pager_isinuse( 172 struct vnode *); 173extern boolean_t vnode_pager_isSSD( 174 struct vnode *); 175extern void vnode_pager_throttle( 176 void); 177extern uint32_t vnode_pager_return_throttle_io_limit( 178 struct vnode *, 179 uint32_t *); 180extern kern_return_t vnode_pager_get_name( 181 struct vnode *vp, 182 char *pathname, 183 vm_size_t pathname_len, 184 char *filename, 185 vm_size_t filename_len, 186 boolean_t *truncated_path_p); 187struct timespec; 188extern kern_return_t vnode_pager_get_mtime( 189 struct vnode *vp, 190 struct timespec *mtime, 191 struct timespec *cs_mtime); 192extern kern_return_t vnode_pager_get_cs_blobs( 193 struct vnode *vp, 194 void **blobs); 195 196#if CHECK_CS_VALIDATION_BITMAP 197/* used by the vnode_pager_cs_validation_bitmap routine*/ 198#define CS_BITMAP_SET 1 199#define CS_BITMAP_CLEAR 2 200#define CS_BITMAP_CHECK 3 201 202#endif /* CHECK_CS_VALIDATION_BITMAP */ 203 204extern void vnode_pager_bootstrap(void); 205extern kern_return_t 206vnode_pager_data_unlock( 207 memory_object_t mem_obj, 208 memory_object_offset_t offset, 209 memory_object_size_t size, 210 vm_prot_t desired_access); 211extern kern_return_t vnode_pager_init( 212 memory_object_t, 213 memory_object_control_t, 214 memory_object_cluster_size_t); 215extern kern_return_t vnode_pager_get_object_size( 216 memory_object_t, 217 memory_object_offset_t *); 218extern kern_return_t vnode_pager_get_isinuse( 219 memory_object_t, 220 uint32_t *); 221extern kern_return_t vnode_pager_get_isSSD( 222 memory_object_t, 223 boolean_t *); 224extern kern_return_t vnode_pager_get_throttle_io_limit( 225 memory_object_t, 226 uint32_t *); 227extern kern_return_t vnode_pager_get_object_name( 228 memory_object_t mem_obj, 229 char *pathname, 230 vm_size_t pathname_len, 231 char *filename, 232 vm_size_t filename_len, 233 boolean_t *truncated_path_p); 234extern kern_return_t vnode_pager_get_object_mtime( 235 memory_object_t mem_obj, 236 struct timespec *mtime, 237 struct timespec *cs_mtime); 238extern kern_return_t vnode_pager_get_object_cs_blobs( 239 memory_object_t mem_obj, 240 void **blobs); 241 242#if CHECK_CS_VALIDATION_BITMAP 243extern kern_return_t vnode_pager_cs_check_validation_bitmap( 244 memory_object_t mem_obj, 245 memory_object_offset_t offset, 246 int optype); 247#endif /*CHECK_CS_VALIDATION_BITMAP*/ 248 249extern kern_return_t ubc_cs_check_validation_bitmap ( 250 struct vnode *vp, 251 memory_object_offset_t offset, 252 int optype); 253 254extern kern_return_t vnode_pager_data_request( 255 memory_object_t, 256 memory_object_offset_t, 257 memory_object_cluster_size_t, 258 vm_prot_t, 259 memory_object_fault_info_t); 260extern kern_return_t vnode_pager_data_return( 261 memory_object_t, 262 memory_object_offset_t, 263 memory_object_cluster_size_t, 264 memory_object_offset_t *, 265 int *, 266 boolean_t, 267 boolean_t, 268 int); 269extern kern_return_t vnode_pager_data_initialize( 270 memory_object_t, 271 memory_object_offset_t, 272 memory_object_cluster_size_t); 273extern void vnode_pager_reference( 274 memory_object_t mem_obj); 275extern kern_return_t vnode_pager_synchronize( 276 memory_object_t mem_obj, 277 memory_object_offset_t offset, 278 memory_object_size_t length, 279 vm_sync_t sync_flags); 280extern kern_return_t vnode_pager_map( 281 memory_object_t mem_obj, 282 vm_prot_t prot); 283extern kern_return_t vnode_pager_last_unmap( 284 memory_object_t mem_obj); 285extern void vnode_pager_deallocate( 286 memory_object_t); 287extern kern_return_t vnode_pager_terminate( 288 memory_object_t); 289extern void vnode_pager_vrele( 290 struct vnode *vp); 291extern void vnode_pager_release_from_cache( 292 int *); 293extern int ubc_map( 294 struct vnode *vp, 295 int flags); 296extern void ubc_unmap( 297 struct vnode *vp); 298 299struct vm_map_entry; 300extern struct vm_object *find_vnode_object(struct vm_map_entry *entry); 301 302extern void dp_memory_object_reference(memory_object_t); 303extern void dp_memory_object_deallocate(memory_object_t); 304#ifndef _memory_object_server_ 305extern kern_return_t dp_memory_object_init(memory_object_t, 306 memory_object_control_t, 307 memory_object_cluster_size_t); 308extern kern_return_t dp_memory_object_terminate(memory_object_t); 309extern kern_return_t dp_memory_object_data_request(memory_object_t, 310 memory_object_offset_t, 311 memory_object_cluster_size_t, 312 vm_prot_t, 313 memory_object_fault_info_t); 314extern kern_return_t dp_memory_object_data_return(memory_object_t, 315 memory_object_offset_t, 316 memory_object_cluster_size_t, 317 memory_object_offset_t *, 318 int *, 319 boolean_t, 320 boolean_t, 321 int); 322extern kern_return_t dp_memory_object_data_initialize(memory_object_t, 323 memory_object_offset_t, 324 memory_object_cluster_size_t); 325extern kern_return_t dp_memory_object_data_unlock(memory_object_t, 326 memory_object_offset_t, 327 memory_object_size_t, 328 vm_prot_t); 329extern kern_return_t dp_memory_object_synchronize(memory_object_t, 330 memory_object_offset_t, 331 memory_object_size_t, 332 vm_sync_t); 333extern kern_return_t dp_memory_object_map(memory_object_t, 334 vm_prot_t); 335extern kern_return_t dp_memory_object_last_unmap(memory_object_t); 336#endif /* _memory_object_server_ */ 337#ifndef _memory_object_default_server_ 338extern kern_return_t default_pager_memory_object_create( 339 memory_object_default_t, 340 vm_size_t, 341 memory_object_t *); 342#endif /* _memory_object_default_server_ */ 343 344#if CONFIG_FREEZE 345extern unsigned int default_pager_swap_pages_free(void); 346struct default_freezer_handle; 347struct vm_page; 348__private_extern__ void default_freezer_init(void); 349__private_extern__ struct default_freezer_handle* default_freezer_handle_allocate(void); 350__private_extern__ kern_return_t 351default_freezer_handle_init( 352 struct default_freezer_handle *df_handle); 353__private_extern__ void 354default_freezer_handle_deallocate( 355 struct default_freezer_handle *df_handle); 356__private_extern__ void 357default_freezer_pageout( 358 struct default_freezer_handle *df_handle); 359__private_extern__ kern_return_t 360default_freezer_pack( 361 unsigned int *purgeable_count, 362 unsigned int *wired_count, 363 unsigned int *clean_count, 364 unsigned int *dirty_count, 365 unsigned int dirty_budget, 366 boolean_t *shared, 367 vm_object_t src_object, 368 struct default_freezer_handle *df_handle); 369__private_extern__ kern_return_t 370default_freezer_unpack( 371 struct default_freezer_handle *df_handle); 372__private_extern__ void 373default_freezer_pack_page( 374 struct vm_page* p, 375 struct default_freezer_handle *df_handle); 376 377#endif /* CONFIG_FREEZE */ 378 379extern void device_pager_reference(memory_object_t); 380extern void device_pager_deallocate(memory_object_t); 381extern kern_return_t device_pager_init(memory_object_t, 382 memory_object_control_t, 383 memory_object_cluster_size_t); 384extern kern_return_t device_pager_terminate(memory_object_t); 385extern kern_return_t device_pager_data_request(memory_object_t, 386 memory_object_offset_t, 387 memory_object_cluster_size_t, 388 vm_prot_t, 389 memory_object_fault_info_t); 390extern kern_return_t device_pager_data_return(memory_object_t, 391 memory_object_offset_t, 392 memory_object_cluster_size_t, 393 memory_object_offset_t *, 394 int *, 395 boolean_t, 396 boolean_t, 397 int); 398extern kern_return_t device_pager_data_initialize(memory_object_t, 399 memory_object_offset_t, 400 memory_object_cluster_size_t); 401extern kern_return_t device_pager_data_unlock(memory_object_t, 402 memory_object_offset_t, 403 memory_object_size_t, 404 vm_prot_t); 405extern kern_return_t device_pager_synchronize(memory_object_t, 406 memory_object_offset_t, 407 memory_object_size_t, 408 vm_sync_t); 409extern kern_return_t device_pager_map(memory_object_t, vm_prot_t); 410extern kern_return_t device_pager_last_unmap(memory_object_t); 411extern kern_return_t device_pager_populate_object( 412 memory_object_t device, 413 memory_object_offset_t offset, 414 ppnum_t page_num, 415 vm_size_t size); 416extern memory_object_t device_pager_setup( 417 memory_object_t, 418 uintptr_t, 419 vm_size_t, 420 int); 421extern void device_pager_bootstrap(void); 422 423extern kern_return_t pager_map_to_phys_contiguous( 424 memory_object_control_t object, 425 memory_object_offset_t offset, 426 addr64_t base_vaddr, 427 vm_size_t size); 428 429extern kern_return_t memory_object_create_named( 430 memory_object_t pager, 431 memory_object_offset_t size, 432 memory_object_control_t *control); 433 434struct macx_triggers_args; 435extern int mach_macx_triggers( 436 struct macx_triggers_args *args); 437 438extern int macx_swapinfo( 439 memory_object_size_t *total_p, 440 memory_object_size_t *avail_p, 441 vm_size_t *pagesize_p, 442 boolean_t *encrypted_p); 443 444extern void log_stack_execution_failure(addr64_t vaddr, vm_prot_t prot); 445extern void log_unnest_badness(vm_map_t, vm_map_offset_t, vm_map_offset_t); 446 447struct proc; 448extern int cs_allow_invalid(struct proc *p); 449extern int cs_invalid_page(addr64_t vaddr); 450extern boolean_t cs_validate_page(void *blobs, 451 memory_object_t pager, 452 memory_object_offset_t offset, 453 const void *data, 454 boolean_t *tainted); 455 456extern kern_return_t mach_memory_entry_purgable_control( 457 ipc_port_t entry_port, 458 vm_purgable_t control, 459 int *state); 460 461extern kern_return_t mach_memory_entry_get_page_counts( 462 ipc_port_t entry_port, 463 unsigned int *resident_page_count, 464 unsigned int *dirty_page_count); 465 466extern kern_return_t mach_memory_entry_page_op( 467 ipc_port_t entry_port, 468 vm_object_offset_t offset, 469 int ops, 470 ppnum_t *phys_entry, 471 int *flags); 472 473extern kern_return_t mach_memory_entry_range_op( 474 ipc_port_t entry_port, 475 vm_object_offset_t offset_beg, 476 vm_object_offset_t offset_end, 477 int ops, 478 int *range); 479 480extern void mach_memory_entry_port_release(ipc_port_t port); 481extern void mach_destroy_memory_entry(ipc_port_t port); 482extern kern_return_t mach_memory_entry_allocate( 483 struct vm_named_entry **user_entry_p, 484 ipc_port_t *user_handle_p); 485 486extern void vm_paging_map_init(void); 487 488extern int macx_backing_store_compaction(int flags); 489extern unsigned int mach_vm_ctl_page_free_wanted(void); 490 491extern void no_paging_space_action(void); 492 493#define VM_TOGGLE_CLEAR 0 494#define VM_TOGGLE_SET 1 495#define VM_TOGGLE_GETVALUE 999 496int vm_toggle_entry_reuse(int, int*); 497 498#define SWAP_WRITE 0x00000000 /* Write buffer (pseudo flag). */ 499#define SWAP_READ 0x00000001 /* Read buffer. */ 500#define SWAP_ASYNC 0x00000002 /* Start I/O, do not wait. */ 501 502extern void vm_compressor_pager_init(void); 503extern kern_return_t compressor_memory_object_create( 504 memory_object_size_t, 505 memory_object_t *); 506 507/* the object purger. purges the next eligible object from memory. */ 508/* returns TRUE if an object was purged, otherwise FALSE. */ 509boolean_t vm_purgeable_object_purge_one_unlocked(int force_purge_below_group); 510 511struct trim_list { 512 uint64_t tl_offset; 513 uint64_t tl_length; 514 struct trim_list *tl_next; 515}; 516 517u_int32_t vnode_trim_list(struct vnode *vp, struct trim_list *tl); 518 519#endif /* _VM_VM_PROTOS_H_ */ 520 521#endif /* XNU_KERNEL_PRIVATE */ 522