1/* 2 * Copyright (c) 2004-2007 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 29#ifdef XNU_KERNEL_PRIVATE 30 31#ifndef _VM_VM_PROTOS_H_ 32#define _VM_VM_PROTOS_H_ 33 34#include <mach/mach_types.h> 35#include <kern/kern_types.h> 36 37/* 38 * This file contains various type definitions and routine prototypes 39 * that are needed to avoid compilation warnings for VM code (in osfmk, 40 * default_pager and bsd). 41 * Most of these should eventually go into more appropriate header files. 42 * 43 * Include it after all other header files since it doesn't include any 44 * type definitions and it works around some conflicts with other header 45 * files. 46 */ 47 48/* 49 * iokit 50 */ 51extern kern_return_t device_data_action( 52 uintptr_t device_handle, 53 ipc_port_t device_pager, 54 vm_prot_t protection, 55 vm_object_offset_t offset, 56 vm_size_t size); 57 58extern kern_return_t device_close( 59 uintptr_t device_handle); 60 61/* 62 * default_pager 63 */ 64extern int start_def_pager( 65 char *bs_device); 66extern int default_pager_init_flag; 67 68/* 69 * osfmk 70 */ 71#ifndef _IPC_IPC_PORT_H_ 72extern mach_port_name_t ipc_port_copyout_send( 73 ipc_port_t sright, 74 ipc_space_t space); 75extern task_t port_name_to_task( 76 mach_port_name_t name); 77#endif /* _IPC_IPC_PORT_H_ */ 78 79extern ipc_space_t get_task_ipcspace( 80 task_t t); 81 82/* Some loose-ends VM stuff */ 83 84extern vm_map_t kalloc_map; 85extern vm_size_t msg_ool_size_small; 86extern vm_map_t zone_map; 87 88extern void consider_machine_adjust(void); 89extern pmap_t get_map_pmap(vm_map_t); 90extern vm_map_offset_t get_map_min(vm_map_t); 91extern vm_map_offset_t get_map_max(vm_map_t); 92extern vm_map_size_t get_vmmap_size(vm_map_t); 93extern int get_vmmap_entries(vm_map_t); 94 95extern boolean_t coredumpok(vm_map_t map, vm_offset_t va); 96 97/* 98 * VM routines that used to be published to 99 * user space, and are now restricted to the kernel. 100 * 101 * They should eventually go away entirely - 102 * to be replaced with standard vm_map() and 103 * vm_deallocate() calls. 104 */ 105 106extern kern_return_t vm_upl_map 107( 108 vm_map_t target_task, 109 upl_t upl, 110 vm_address_t *address 111); 112 113extern kern_return_t vm_upl_unmap 114( 115 vm_map_t target_task, 116 upl_t upl 117); 118 119extern kern_return_t vm_region_object_create 120( 121 vm_map_t target_task, 122 vm_size_t size, 123 ipc_port_t *object_handle 124); 125 126extern mach_vm_offset_t mach_get_vm_start(vm_map_t); 127extern mach_vm_offset_t mach_get_vm_end(vm_map_t); 128 129#if CONFIG_CODE_DECRYPTION 130struct pager_crypt_info; 131extern kern_return_t vm_map_apple_protected( 132 vm_map_t map, 133 vm_map_offset_t start, 134 vm_map_offset_t end, 135 struct pager_crypt_info *crypt_info); 136extern void apple_protect_pager_bootstrap(void); 137extern memory_object_t apple_protect_pager_setup(vm_object_t backing_object, 138 struct pager_crypt_info *crypt_info); 139#endif /* CONFIG_CODE_DECRYPTION */ 140 141struct vnode; 142extern void swapfile_pager_bootstrap(void); 143extern memory_object_t swapfile_pager_setup(struct vnode *vp); 144extern memory_object_control_t swapfile_pager_control(memory_object_t mem_obj); 145 146 147/* 148 * bsd 149 */ 150struct vnode; 151extern void vnode_pager_shutdown(void); 152extern void *upl_get_internal_page_list( 153 upl_t upl); 154 155typedef int pager_return_t; 156extern pager_return_t vnode_pagein( 157 struct vnode *, upl_t, 158 upl_offset_t, vm_object_offset_t, 159 upl_size_t, int, int *); 160extern pager_return_t vnode_pageout( 161 struct vnode *, upl_t, 162 upl_offset_t, vm_object_offset_t, 163 upl_size_t, int, int *); 164extern uint32_t vnode_trim (struct vnode *, int64_t offset, unsigned long len); 165extern memory_object_t vnode_pager_setup( 166 struct vnode *, memory_object_t); 167extern vm_object_offset_t vnode_pager_get_filesize( 168 struct vnode *); 169extern uint32_t vnode_pager_isinuse( 170 struct vnode *); 171extern boolean_t vnode_pager_isSSD( 172 struct vnode *); 173extern void vnode_pager_throttle( 174 void); 175extern uint32_t vnode_pager_return_hard_throttle_limit( 176 struct vnode *, 177 uint32_t *, 178 uint32_t); 179extern kern_return_t vnode_pager_get_pathname( 180 struct vnode *vp, 181 char *pathname, 182 vm_size_t *length_p); 183extern kern_return_t vnode_pager_get_filename( 184 struct vnode *vp, 185 const char **filename); 186extern kern_return_t vnode_pager_get_cs_blobs( 187 struct vnode *vp, 188 void **blobs); 189 190#if CHECK_CS_VALIDATION_BITMAP 191/* used by the vnode_pager_cs_validation_bitmap routine*/ 192#define CS_BITMAP_SET 1 193#define CS_BITMAP_CLEAR 2 194#define CS_BITMAP_CHECK 3 195 196#endif /* CHECK_CS_VALIDATION_BITMAP */ 197 198extern void vnode_pager_bootstrap(void) __attribute__((section("__TEXT, initcode"))); 199extern kern_return_t 200vnode_pager_data_unlock( 201 memory_object_t mem_obj, 202 memory_object_offset_t offset, 203 memory_object_size_t size, 204 vm_prot_t desired_access); 205extern kern_return_t vnode_pager_init( 206 memory_object_t, 207 memory_object_control_t, 208 memory_object_cluster_size_t); 209extern kern_return_t vnode_pager_get_object_size( 210 memory_object_t, 211 memory_object_offset_t *); 212extern kern_return_t vnode_pager_get_isinuse( 213 memory_object_t, 214 uint32_t *); 215extern kern_return_t vnode_pager_get_isSSD( 216 memory_object_t, 217 boolean_t *); 218extern kern_return_t vnode_pager_check_hard_throttle( 219 memory_object_t, 220 uint32_t *, 221 uint32_t); 222extern kern_return_t vnode_pager_get_object_pathname( 223 memory_object_t mem_obj, 224 char *pathname, 225 vm_size_t *length_p); 226extern kern_return_t vnode_pager_get_object_filename( 227 memory_object_t mem_obj, 228 const char **filename); 229extern kern_return_t vnode_pager_get_object_cs_blobs( 230 memory_object_t mem_obj, 231 void **blobs); 232 233#if CHECK_CS_VALIDATION_BITMAP 234extern kern_return_t vnode_pager_cs_check_validation_bitmap( 235 memory_object_t mem_obj, 236 memory_object_offset_t offset, 237 int optype); 238#endif /*CHECK_CS_VALIDATION_BITMAP*/ 239 240extern kern_return_t ubc_cs_check_validation_bitmap ( 241 struct vnode *vp, 242 memory_object_offset_t offset, 243 int optype); 244 245extern kern_return_t vnode_pager_data_request( 246 memory_object_t, 247 memory_object_offset_t, 248 memory_object_cluster_size_t, 249 vm_prot_t, 250 memory_object_fault_info_t); 251extern kern_return_t vnode_pager_data_return( 252 memory_object_t, 253 memory_object_offset_t, 254 memory_object_cluster_size_t, 255 memory_object_offset_t *, 256 int *, 257 boolean_t, 258 boolean_t, 259 int); 260extern kern_return_t vnode_pager_data_initialize( 261 memory_object_t, 262 memory_object_offset_t, 263 memory_object_cluster_size_t); 264extern void vnode_pager_reference( 265 memory_object_t mem_obj); 266extern kern_return_t vnode_pager_synchronize( 267 memory_object_t mem_obj, 268 memory_object_offset_t offset, 269 memory_object_size_t length, 270 vm_sync_t sync_flags); 271extern kern_return_t vnode_pager_map( 272 memory_object_t mem_obj, 273 vm_prot_t prot); 274extern kern_return_t vnode_pager_last_unmap( 275 memory_object_t mem_obj); 276extern void vnode_pager_deallocate( 277 memory_object_t); 278extern kern_return_t vnode_pager_terminate( 279 memory_object_t); 280extern void vnode_pager_vrele( 281 struct vnode *vp); 282extern void vnode_pager_release_from_cache( 283 int *); 284extern int ubc_map( 285 struct vnode *vp, 286 int flags); 287extern void ubc_unmap( 288 struct vnode *vp); 289 290struct vm_map_entry; 291extern struct vm_object *find_vnode_object(struct vm_map_entry *entry); 292 293extern void dp_memory_object_reference(memory_object_t); 294extern void dp_memory_object_deallocate(memory_object_t); 295#ifndef _memory_object_server_ 296extern kern_return_t dp_memory_object_init(memory_object_t, 297 memory_object_control_t, 298 memory_object_cluster_size_t); 299extern kern_return_t dp_memory_object_terminate(memory_object_t); 300extern kern_return_t dp_memory_object_data_request(memory_object_t, 301 memory_object_offset_t, 302 memory_object_cluster_size_t, 303 vm_prot_t, 304 memory_object_fault_info_t); 305extern kern_return_t dp_memory_object_data_return(memory_object_t, 306 memory_object_offset_t, 307 memory_object_cluster_size_t, 308 memory_object_offset_t *, 309 int *, 310 boolean_t, 311 boolean_t, 312 int); 313extern kern_return_t dp_memory_object_data_initialize(memory_object_t, 314 memory_object_offset_t, 315 memory_object_cluster_size_t); 316extern kern_return_t dp_memory_object_data_unlock(memory_object_t, 317 memory_object_offset_t, 318 memory_object_size_t, 319 vm_prot_t); 320extern kern_return_t dp_memory_object_synchronize(memory_object_t, 321 memory_object_offset_t, 322 memory_object_size_t, 323 vm_sync_t); 324extern kern_return_t dp_memory_object_map(memory_object_t, 325 vm_prot_t); 326extern kern_return_t dp_memory_object_last_unmap(memory_object_t); 327#endif /* _memory_object_server_ */ 328#ifndef _memory_object_default_server_ 329extern kern_return_t default_pager_memory_object_create( 330 memory_object_default_t, 331 vm_size_t, 332 memory_object_t *); 333#endif /* _memory_object_default_server_ */ 334 335#if CONFIG_FREEZE 336extern unsigned int default_pager_swap_pages_free(void); 337struct default_freezer_handle; 338struct vm_page; 339__private_extern__ void default_freezer_init(void); 340__private_extern__ struct default_freezer_handle* default_freezer_handle_allocate(void); 341__private_extern__ kern_return_t 342default_freezer_handle_init( 343 struct default_freezer_handle *df_handle); 344__private_extern__ void 345default_freezer_handle_deallocate( 346 struct default_freezer_handle *df_handle); 347__private_extern__ void 348default_freezer_pageout( 349 struct default_freezer_handle *df_handle); 350__private_extern__ kern_return_t 351default_freezer_pack( 352 unsigned int *purgeable_count, 353 unsigned int *wired_count, 354 unsigned int *clean_count, 355 unsigned int *dirty_count, 356 unsigned int dirty_budget, 357 boolean_t *shared, 358 vm_object_t src_object, 359 struct default_freezer_handle *df_handle); 360__private_extern__ void 361default_freezer_unpack( 362 struct default_freezer_handle *df_handle); 363__private_extern__ void 364default_freezer_pack_page( 365 struct vm_page* p, 366 struct default_freezer_handle *df_handle); 367 368#endif /* CONFIG_FREEZE */ 369 370extern void device_pager_reference(memory_object_t); 371extern void device_pager_deallocate(memory_object_t); 372extern kern_return_t device_pager_init(memory_object_t, 373 memory_object_control_t, 374 memory_object_cluster_size_t); 375extern kern_return_t device_pager_terminate(memory_object_t); 376extern kern_return_t device_pager_data_request(memory_object_t, 377 memory_object_offset_t, 378 memory_object_cluster_size_t, 379 vm_prot_t, 380 memory_object_fault_info_t); 381extern kern_return_t device_pager_data_return(memory_object_t, 382 memory_object_offset_t, 383 memory_object_cluster_size_t, 384 memory_object_offset_t *, 385 int *, 386 boolean_t, 387 boolean_t, 388 int); 389extern kern_return_t device_pager_data_initialize(memory_object_t, 390 memory_object_offset_t, 391 memory_object_cluster_size_t); 392extern kern_return_t device_pager_data_unlock(memory_object_t, 393 memory_object_offset_t, 394 memory_object_size_t, 395 vm_prot_t); 396extern kern_return_t device_pager_synchronize(memory_object_t, 397 memory_object_offset_t, 398 memory_object_size_t, 399 vm_sync_t); 400extern kern_return_t device_pager_map(memory_object_t, vm_prot_t); 401extern kern_return_t device_pager_last_unmap(memory_object_t); 402extern kern_return_t device_pager_populate_object( 403 memory_object_t device, 404 memory_object_offset_t offset, 405 ppnum_t page_num, 406 vm_size_t size); 407extern memory_object_t device_pager_setup( 408 memory_object_t, 409 uintptr_t, 410 vm_size_t, 411 int); 412extern void device_pager_bootstrap(void) __attribute__((section("__TEXT, initcode"))); 413 414extern kern_return_t memory_object_create_named( 415 memory_object_t pager, 416 memory_object_offset_t size, 417 memory_object_control_t *control); 418 419struct macx_triggers_args; 420extern int mach_macx_triggers( 421 struct macx_triggers_args *args); 422 423extern int macx_swapinfo( 424 memory_object_size_t *total_p, 425 memory_object_size_t *avail_p, 426 vm_size_t *pagesize_p, 427 boolean_t *encrypted_p); 428 429extern void log_stack_execution_failure(addr64_t vaddr, vm_prot_t prot); 430extern void log_unnest_badness(vm_map_t, vm_map_offset_t, vm_map_offset_t); 431 432extern int cs_allow_invalid(struct proc *p); 433extern int cs_invalid_page(addr64_t vaddr); 434extern boolean_t cs_validate_page(void *blobs, 435 memory_object_t pager, 436 memory_object_offset_t offset, 437 const void *data, 438 boolean_t *tainted); 439 440extern kern_return_t mach_memory_entry_purgable_control( 441 ipc_port_t entry_port, 442 vm_purgable_t control, 443 int *state); 444 445extern kern_return_t mach_memory_entry_page_op( 446 ipc_port_t entry_port, 447 vm_object_offset_t offset, 448 int ops, 449 ppnum_t *phys_entry, 450 int *flags); 451 452extern kern_return_t mach_memory_entry_range_op( 453 ipc_port_t entry_port, 454 vm_object_offset_t offset_beg, 455 vm_object_offset_t offset_end, 456 int ops, 457 int *range); 458 459extern void mach_memory_entry_port_release(ipc_port_t port); 460extern void mach_destroy_memory_entry(ipc_port_t port); 461extern kern_return_t mach_memory_entry_allocate( 462 struct vm_named_entry **user_entry_p, 463 ipc_port_t *user_handle_p); 464 465extern void vm_paging_map_init(void); 466 467extern int macx_backing_store_compaction(int flags); 468extern unsigned int mach_vm_ctl_page_free_wanted(void); 469 470extern void no_paging_space_action(void); 471 472#define VM_TOGGLE_CLEAR 0 473#define VM_TOGGLE_SET 1 474#define VM_TOGGLE_GETVALUE 999 475int vm_toggle_entry_reuse(int, int*); 476#endif /* _VM_VM_PROTOS_H_ */ 477 478#endif /* XNU_KERNEL_PRIVATE */ 479