1/* 2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28/* 29 * @OSF_COPYRIGHT@ 30 */ 31/* 32 * Mach Operating System 33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University 34 * All Rights Reserved. 35 * 36 * Permission to use, copy, modify and distribute this software and its 37 * documentation is hereby granted, provided that both the copyright 38 * notice and this permission notice appear in all copies of the 39 * software, derivative works or modified versions, and any portions 40 * thereof, and that both notices appear in supporting documentation. 41 * 42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR 44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 45 * 46 * Carnegie Mellon requests users of this software to return to 47 * 48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 49 * School of Computer Science 50 * Carnegie Mellon University 51 * Pittsburgh PA 15213-3890 52 * 53 * any improvements or extensions that they make and grant Carnegie Mellon 54 * the rights to redistribute these changes. 55 */ 56/* 57 */ 58/* 59 * File: memory_object.h 60 * Author: Michael Wayne Young 61 * 62 * External memory management interface definition. 63 */ 64 65#ifndef _MACH_MEMORY_OBJECT_TYPES_H_ 66#define _MACH_MEMORY_OBJECT_TYPES_H_ 67 68/* 69 * User-visible types used in the external memory 70 * management interface: 71 */ 72 73#include <mach/port.h> 74#include <mach/message.h> 75#include <mach/vm_prot.h> 76#include <mach/vm_sync.h> 77#include <mach/vm_types.h> 78#include <mach/machine/vm_types.h> 79 80#include <sys/cdefs.h> 81 82#define VM_64_BIT_DATA_OBJECTS 83 84typedef unsigned long long memory_object_offset_t; 85typedef unsigned long long memory_object_size_t; 86typedef natural_t memory_object_cluster_size_t; 87typedef natural_t * memory_object_fault_info_t; 88 89 90/* 91 * Temporary until real EMMI version gets re-implemented 92 */ 93 94#ifdef KERNEL_PRIVATE 95 96struct memory_object_pager_ops; /* forward declaration */ 97 98typedef struct memory_object { 99 const struct memory_object_pager_ops *mo_pager_ops; 100} *memory_object_t; 101 102typedef struct memory_object_control { 103 struct vm_object *moc_object; 104 unsigned int moc_ikot; /* XXX fake ip_kotype */ 105} *memory_object_control_t; 106 107typedef const struct memory_object_pager_ops { 108 void (*memory_object_reference)( 109 memory_object_t mem_obj); 110 void (*memory_object_deallocate)( 111 memory_object_t mem_obj); 112 kern_return_t (*memory_object_init)( 113 memory_object_t mem_obj, 114 memory_object_control_t mem_control, 115 memory_object_cluster_size_t size); 116 kern_return_t (*memory_object_terminate)( 117 memory_object_t mem_obj); 118 kern_return_t (*memory_object_data_request)( 119 memory_object_t mem_obj, 120 memory_object_offset_t offset, 121 memory_object_cluster_size_t length, 122 vm_prot_t desired_access, 123 memory_object_fault_info_t fault_info); 124 kern_return_t (*memory_object_data_return)( 125 memory_object_t mem_obj, 126 memory_object_offset_t offset, 127 vm_size_t size, 128 memory_object_offset_t *resid_offset, 129 int *io_error, 130 boolean_t dirty, 131 boolean_t kernel_copy, 132 int upl_flags); 133 kern_return_t (*memory_object_data_initialize)( 134 memory_object_t mem_obj, 135 memory_object_offset_t offset, 136 vm_size_t size); 137 kern_return_t (*memory_object_data_unlock)( 138 memory_object_t mem_obj, 139 memory_object_offset_t offset, 140 vm_size_t size, 141 vm_prot_t desired_access); 142 kern_return_t (*memory_object_synchronize)( 143 memory_object_t mem_obj, 144 memory_object_offset_t offset, 145 vm_size_t size, 146 vm_sync_t sync_flags); 147 kern_return_t (*memory_object_map)( 148 memory_object_t mem_obj, 149 vm_prot_t prot); 150 kern_return_t (*memory_object_last_unmap)( 151 memory_object_t mem_obj); 152 const char *memory_object_pager_name; 153} * memory_object_pager_ops_t; 154 155#else /* KERNEL_PRIVATE */ 156 157typedef mach_port_t memory_object_t; 158typedef mach_port_t memory_object_control_t; 159 160#endif /* KERNEL_PRIVATE */ 161 162typedef memory_object_t *memory_object_array_t; 163 /* A memory object ... */ 164 /* Used by the kernel to retrieve */ 165 /* or store data */ 166 167typedef mach_port_t memory_object_name_t; 168 /* Used to describe the memory ... */ 169 /* object in vm_regions() calls */ 170 171typedef mach_port_t memory_object_default_t; 172 /* Registered with the host ... */ 173 /* for creating new internal objects */ 174 175#define MEMORY_OBJECT_NULL ((memory_object_t) 0) 176#define MEMORY_OBJECT_CONTROL_NULL ((memory_object_control_t) 0) 177#define MEMORY_OBJECT_NAME_NULL ((memory_object_name_t) 0) 178#define MEMORY_OBJECT_DEFAULT_NULL ((memory_object_default_t) 0) 179 180 181typedef int memory_object_copy_strategy_t; 182 /* How memory manager handles copy: */ 183#define MEMORY_OBJECT_COPY_NONE 0 184 /* ... No special support */ 185#define MEMORY_OBJECT_COPY_CALL 1 186 /* ... Make call on memory manager */ 187#define MEMORY_OBJECT_COPY_DELAY 2 188 /* ... Memory manager doesn't 189 * change data externally. 190 */ 191#define MEMORY_OBJECT_COPY_TEMPORARY 3 192 /* ... Memory manager doesn't 193 * change data externally, and 194 * doesn't need to see changes. 195 */ 196#define MEMORY_OBJECT_COPY_SYMMETRIC 4 197 /* ... Memory manager doesn't 198 * change data externally, 199 * doesn't need to see changes, 200 * and object will not be 201 * multiply mapped. 202 * 203 * XXX 204 * Not yet safe for non-kernel use. 205 */ 206 207#define MEMORY_OBJECT_COPY_INVALID 5 208 /* ... An invalid copy strategy, 209 * for external objects which 210 * have not been initialized. 211 * Allows copy_strategy to be 212 * examined without also 213 * examining pager_ready and 214 * internal. 215 */ 216 217typedef int memory_object_return_t; 218 /* Which pages to return to manager 219 this time (lock_request) */ 220#define MEMORY_OBJECT_RETURN_NONE 0 221 /* ... don't return any. */ 222#define MEMORY_OBJECT_RETURN_DIRTY 1 223 /* ... only dirty pages. */ 224#define MEMORY_OBJECT_RETURN_ALL 2 225 /* ... dirty and precious pages. */ 226#define MEMORY_OBJECT_RETURN_ANYTHING 3 227 /* ... any resident page. */ 228 229/* 230 * Data lock request flags 231 */ 232 233#define MEMORY_OBJECT_DATA_FLUSH 0x1 234#define MEMORY_OBJECT_DATA_NO_CHANGE 0x2 235#define MEMORY_OBJECT_DATA_PURGE 0x4 236#define MEMORY_OBJECT_COPY_SYNC 0x8 237#define MEMORY_OBJECT_DATA_SYNC 0x10 238#define MEMORY_OBJECT_IO_SYNC 0x20 239 240/* 241 * Types for the memory object flavor interfaces 242 */ 243 244#define MEMORY_OBJECT_INFO_MAX (1024) 245typedef int *memory_object_info_t; 246typedef int memory_object_flavor_t; 247typedef int memory_object_info_data_t[MEMORY_OBJECT_INFO_MAX]; 248 249 250#define MEMORY_OBJECT_PERFORMANCE_INFO 11 251#define MEMORY_OBJECT_ATTRIBUTE_INFO 14 252#define MEMORY_OBJECT_BEHAVIOR_INFO 15 253 254#ifdef PRIVATE 255 256#define OLD_MEMORY_OBJECT_BEHAVIOR_INFO 10 257#define OLD_MEMORY_OBJECT_ATTRIBUTE_INFO 12 258 259struct old_memory_object_behave_info { 260 memory_object_copy_strategy_t copy_strategy; 261 boolean_t temporary; 262 boolean_t invalidate; 263}; 264 265struct old_memory_object_attr_info { /* old attr list */ 266 boolean_t object_ready; 267 boolean_t may_cache; 268 memory_object_copy_strategy_t copy_strategy; 269}; 270 271typedef struct old_memory_object_behave_info *old_memory_object_behave_info_t; 272typedef struct old_memory_object_behave_info old_memory_object_behave_info_data_t; 273typedef struct old_memory_object_attr_info *old_memory_object_attr_info_t; 274typedef struct old_memory_object_attr_info old_memory_object_attr_info_data_t; 275 276#define OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT ((mach_msg_type_number_t) \ 277 (sizeof(old_memory_object_behave_info_data_t)/sizeof(int))) 278#define OLD_MEMORY_OBJECT_ATTR_INFO_COUNT ((mach_msg_type_number_t) \ 279 (sizeof(old_memory_object_attr_info_data_t)/sizeof(int))) 280 281#ifdef KERNEL 282 283__BEGIN_DECLS 284extern void memory_object_reference(memory_object_t object); 285extern void memory_object_deallocate(memory_object_t object); 286 287extern void memory_object_default_reference(memory_object_default_t); 288extern void memory_object_default_deallocate(memory_object_default_t); 289 290extern void memory_object_control_reference(memory_object_control_t control); 291extern void memory_object_control_deallocate(memory_object_control_t control); 292extern int memory_object_control_uiomove(memory_object_control_t, memory_object_offset_t, void *, int, int, int, int); 293__END_DECLS 294 295#endif /* KERNEL */ 296 297#endif /* PRIVATE */ 298 299struct memory_object_perf_info { 300 memory_object_cluster_size_t cluster_size; 301 boolean_t may_cache; 302}; 303 304struct memory_object_attr_info { 305 memory_object_copy_strategy_t copy_strategy; 306 memory_object_cluster_size_t cluster_size; 307 boolean_t may_cache_object; 308 boolean_t temporary; 309}; 310 311struct memory_object_behave_info { 312 memory_object_copy_strategy_t copy_strategy; 313 boolean_t temporary; 314 boolean_t invalidate; 315 boolean_t silent_overwrite; 316 boolean_t advisory_pageout; 317}; 318 319 320typedef struct memory_object_behave_info *memory_object_behave_info_t; 321typedef struct memory_object_behave_info memory_object_behave_info_data_t; 322 323typedef struct memory_object_perf_info *memory_object_perf_info_t; 324typedef struct memory_object_perf_info memory_object_perf_info_data_t; 325 326typedef struct memory_object_attr_info *memory_object_attr_info_t; 327typedef struct memory_object_attr_info memory_object_attr_info_data_t; 328 329#define MEMORY_OBJECT_BEHAVE_INFO_COUNT ((mach_msg_type_number_t) \ 330 (sizeof(memory_object_behave_info_data_t)/sizeof(int))) 331#define MEMORY_OBJECT_PERF_INFO_COUNT ((mach_msg_type_number_t) \ 332 (sizeof(memory_object_perf_info_data_t)/sizeof(int))) 333#define MEMORY_OBJECT_ATTR_INFO_COUNT ((mach_msg_type_number_t) \ 334 (sizeof(memory_object_attr_info_data_t)/sizeof(int))) 335 336#define invalid_memory_object_flavor(f) \ 337 (f != MEMORY_OBJECT_ATTRIBUTE_INFO && \ 338 f != MEMORY_OBJECT_PERFORMANCE_INFO && \ 339 f != OLD_MEMORY_OBJECT_BEHAVIOR_INFO && \ 340 f != MEMORY_OBJECT_BEHAVIOR_INFO && \ 341 f != OLD_MEMORY_OBJECT_ATTRIBUTE_INFO) 342 343 344/* 345 * Used to support options on memory_object_release_name call 346 */ 347#define MEMORY_OBJECT_TERMINATE_IDLE 0x1 348#define MEMORY_OBJECT_RESPECT_CACHE 0x2 349#define MEMORY_OBJECT_RELEASE_NO_OP 0x4 350 351 352/* named entry processor mapping options */ 353/* enumerated */ 354#define MAP_MEM_NOOP 0 355#define MAP_MEM_COPYBACK 1 356#define MAP_MEM_IO 2 357#define MAP_MEM_WTHRU 3 358#define MAP_MEM_WCOMB 4 /* Write combining mode */ 359 /* aka store gather */ 360 361#define GET_MAP_MEM(flags) \ 362 ((((unsigned int)(flags)) >> 24) & 0xFF) 363 364#define SET_MAP_MEM(caching, flags) \ 365 ((flags) = ((((unsigned int)(caching)) << 24) \ 366 & 0xFF000000) | ((flags) & 0xFFFFFF)); 367 368/* leave room for vm_prot bits */ 369#define MAP_MEM_ONLY 0x10000 /* change processor caching */ 370#define MAP_MEM_NAMED_CREATE 0x20000 /* create extant object */ 371#define MAP_MEM_PURGABLE 0x40000 /* create a purgable VM object */ 372#define MAP_MEM_NAMED_REUSE 0x80000 /* reuse provided entry if identical */ 373 374#ifdef KERNEL 375 376/* 377 * Universal Page List data structures 378 * 379 * A UPL describes a bounded set of physical pages 380 * associated with some range of an object or map 381 * and a snapshot of the attributes associated with 382 * each of those pages. 383 */ 384#ifdef PRIVATE 385#define MAX_UPL_TRANSFER 256 386#define MAX_UPL_SIZE 4096 387 388struct upl_page_info { 389 ppnum_t phys_addr; /* physical page index number */ 390 unsigned int 391#ifdef XNU_KERNEL_PRIVATE 392 pageout:1, /* page is to be removed on commit */ 393 absent:1, /* No valid data in this page */ 394 dirty:1, /* Page must be cleaned (O) */ 395 precious:1, /* must be cleaned, we have only copy */ 396 device:1, /* no page data, mapped dev memory */ 397 speculative:1, /* page is valid, but not yet accessed */ 398 cs_validated:1, /* CODE SIGNING: page was validated */ 399 cs_tainted:1, /* CODE SIGNING: page is tainted */ 400 :0; /* force to long boundary */ 401#else 402 opaque; /* use upl_page_xxx() accessor funcs */ 403#endif /* XNU_KERNEL_PRIVATE */ 404}; 405 406#else 407 408struct upl_page_info { 409 unsigned int opaque[2]; /* use upl_page_xxx() accessor funcs */ 410}; 411 412#endif /* PRIVATE */ 413 414typedef struct upl_page_info upl_page_info_t; 415typedef upl_page_info_t *upl_page_info_array_t; 416typedef upl_page_info_array_t upl_page_list_ptr_t; 417 418typedef uint32_t upl_offset_t; /* page-aligned byte offset */ 419typedef uint32_t upl_size_t; /* page-aligned byte size */ 420 421/* upl invocation flags */ 422/* top nibble is used by super upl */ 423 424#define UPL_FLAGS_NONE 0x00000000 425#define UPL_COPYOUT_FROM 0x00000001 426#define UPL_PRECIOUS 0x00000002 427#define UPL_NO_SYNC 0x00000004 428#define UPL_CLEAN_IN_PLACE 0x00000008 429#define UPL_NOBLOCK 0x00000010 430#define UPL_RET_ONLY_DIRTY 0x00000020 431#define UPL_SET_INTERNAL 0x00000040 432#define UPL_QUERY_OBJECT_TYPE 0x00000080 433#define UPL_RET_ONLY_ABSENT 0x00000100 /* used only for COPY_FROM = FALSE */ 434#define UPL_FILE_IO 0x00000200 435#define UPL_SET_LITE 0x00000400 436#define UPL_SET_INTERRUPTIBLE 0x00000800 437#define UPL_SET_IO_WIRE 0x00001000 438#define UPL_FOR_PAGEOUT 0x00002000 439#define UPL_WILL_BE_DUMPED 0x00004000 440#define UPL_FORCE_DATA_SYNC 0x00008000 441/* continued after the ticket bits... */ 442 443#define UPL_PAGE_TICKET_MASK 0x000F0000 444#define UPL_PAGE_TICKET_SHIFT 16 445 446/* ... flags resume here */ 447#define UPL_BLOCK_ACCESS 0x00100000 448#define UPL_ENCRYPT 0x00200000 449#define UPL_NOZEROFILL 0x00400000 450#define UPL_WILL_MODIFY 0x00800000 /* caller will modify the pages */ 451 452#define UPL_NEED_32BIT_ADDR 0x01000000 453 454/* UPL flags known by this kernel */ 455#define UPL_VALID_FLAGS 0x01FFFFFF 456 457 458/* upl abort error flags */ 459#define UPL_ABORT_RESTART 0x1 460#define UPL_ABORT_UNAVAILABLE 0x2 461#define UPL_ABORT_ERROR 0x4 462#define UPL_ABORT_FREE_ON_EMPTY 0x8 /* only implemented in wrappers */ 463#define UPL_ABORT_DUMP_PAGES 0x10 464#define UPL_ABORT_NOTIFY_EMPTY 0x20 465#define UPL_ABORT_ALLOW_ACCESS 0x40 466#define UPL_ABORT_REFERENCE 0x80 467 468/* upl pages check flags */ 469#define UPL_CHECK_DIRTY 0x1 470 471 472/* 473 * upl pagein/pageout flags 474 * 475 * 476 * when I/O is issued from this UPL it should be done synchronously 477 */ 478#define UPL_IOSYNC 0x1 479 480/* 481 * the passed in UPL should not have either a commit or abort 482 * applied to it by the underlying layers... the site that 483 * created the UPL is responsible for cleaning it up. 484 */ 485#define UPL_NOCOMMIT 0x2 486 487/* 488 * turn off any speculative read-ahead applied at the I/O layer 489 */ 490#define UPL_NORDAHEAD 0x4 491 492/* 493 * pageout request is targeting a real file 494 * as opposed to a swap file. 495 */ 496 497#define UPL_VNODE_PAGER 0x8 498/* 499 * this pageout is being originated as part of an explicit 500 * memory synchronization operation... no speculative clustering 501 * should be applied, only the range specified should be pushed. 502 */ 503#define UPL_MSYNC 0x10 504 505/* 506 * 507 */ 508#ifdef MACH_KERNEL_PRIVATE 509#define UPL_PAGING_ENCRYPTED 0x20 510#endif /* MACH_KERNEL_PRIVATE */ 511 512/* 513 * this pageout is being originated as part of an explicit 514 * memory synchronization operation that is checking for I/O 515 * errors and taking it's own action... if an error occurs, 516 * just abort the pages back into the cache unchanged 517 */ 518#define UPL_KEEPCACHED 0x40 519 520/* 521 * this pageout originated from within cluster_io to deal 522 * with a dirty page that hasn't yet been seen by the FS 523 * that backs it... tag it so that the FS can take the 524 * appropriate action w/r to its locking model since the 525 * pageout will reenter the FS for the same file currently 526 * being handled in this context. 527 */ 528 529#define UPL_NESTED_PAGEOUT 0x80 530 531 532 533/* upl commit flags */ 534#define UPL_COMMIT_FREE_ON_EMPTY 0x1 /* only implemented in wrappers */ 535#define UPL_COMMIT_CLEAR_DIRTY 0x2 536#define UPL_COMMIT_SET_DIRTY 0x4 537#define UPL_COMMIT_INACTIVATE 0x8 538#define UPL_COMMIT_NOTIFY_EMPTY 0x10 539#define UPL_COMMIT_ALLOW_ACCESS 0x20 540#define UPL_COMMIT_CS_VALIDATED 0x40 541 542#define UPL_COMMIT_KERNEL_ONLY_FLAGS (UPL_COMMIT_CS_VALIDATED) 543 544/* flags for return of state from vm_map_get_upl, vm_upl address space */ 545/* based call */ 546#define UPL_DEV_MEMORY 0x1 547#define UPL_PHYS_CONTIG 0x2 548 549 550/* 551 * Flags for the UPL page ops routine. This routine is not exported 552 * out of the kernel at the moment and so the defs live here. 553 */ 554#define UPL_POP_DIRTY 0x1 555#define UPL_POP_PAGEOUT 0x2 556#define UPL_POP_PRECIOUS 0x4 557#define UPL_POP_ABSENT 0x8 558#define UPL_POP_BUSY 0x10 559 560#define UPL_POP_PHYSICAL 0x10000000 561#define UPL_POP_DUMP 0x20000000 562#define UPL_POP_SET 0x40000000 563#define UPL_POP_CLR 0x80000000 564 565/* 566 * Flags for the UPL range op routine. This routine is not exported 567 * out of the kernel at the moemet and so the defs live here. 568 */ 569/* 570 * UPL_ROP_ABSENT: Returns the extent of the range presented which 571 * is absent, starting with the start address presented 572 */ 573#define UPL_ROP_ABSENT 0x01 574/* 575 * UPL_ROP_PRESENT: Returns the extent of the range presented which 576 * is present (i.e. resident), starting with the start address presented 577 */ 578#define UPL_ROP_PRESENT 0x02 579/* 580 * UPL_ROP_DUMP: Dump the pages which are found in the target object 581 * for the target range. 582 */ 583#define UPL_ROP_DUMP 0x04 584 585#ifdef PRIVATE 586 587/* access macros for upl_t */ 588 589#define UPL_DEVICE_PAGE(upl) \ 590 (((upl)[0].phys_addr != 0) ? ((upl)[0].device) : FALSE) 591 592#define UPL_PAGE_PRESENT(upl, index) \ 593 ((upl)[(index)].phys_addr != 0) 594 595#define UPL_PHYS_PAGE(upl, index) \ 596 ((upl)[(index)].phys_addr) 597 598#define UPL_SPECULATIVE_PAGE(upl, index) \ 599 (((upl)[(index)].phys_addr != 0) ? ((upl)[(index)].speculative) : FALSE) 600 601#define UPL_DIRTY_PAGE(upl, index) \ 602 (((upl)[(index)].phys_addr != 0) ? ((upl)[(index)].dirty) : FALSE) 603 604#define UPL_PRECIOUS_PAGE(upl, index) \ 605 (((upl)[(index)].phys_addr != 0) ? ((upl)[(index)].precious) : FALSE) 606 607#define UPL_VALID_PAGE(upl, index) \ 608 (((upl)[(index)].phys_addr != 0) ? (!((upl)[(index)].absent)) : FALSE) 609 610#define UPL_PAGEOUT_PAGE(upl, index) \ 611 (((upl)[(index)].phys_addr != 0) ? ((upl)[(index)].pageout) : FALSE) 612 613#define UPL_SET_PAGE_FREE_ON_COMMIT(upl, index) \ 614 (((upl)[(index)].phys_addr != 0) ? \ 615 ((upl)[(index)].pageout = TRUE) : FALSE) 616 617#define UPL_CLR_PAGE_FREE_ON_COMMIT(upl, index) \ 618 (((upl)[(index)].phys_addr != 0) ? \ 619 ((upl)[(index)].pageout = FALSE) : FALSE) 620 621/* modifier macros for upl_t */ 622 623#define UPL_SET_CS_VALIDATED(upl, index, value) \ 624 ((upl)[(index)].cs_validated = ((value) ? TRUE : FALSE)) 625 626#define UPL_SET_CS_TAINTED(upl, index, value) \ 627 ((upl)[(index)].cs_tainted = ((value) ? TRUE : FALSE)) 628 629/* The call prototyped below is used strictly by UPL_GET_INTERNAL_PAGE_LIST */ 630 631extern vm_size_t upl_offset_to_pagelist; 632extern vm_size_t upl_get_internal_pagelist_offset(void); 633 634/* UPL_GET_INTERNAL_PAGE_LIST is only valid on internal objects where the */ 635/* list request was made with the UPL_INTERNAL flag */ 636 637#define UPL_GET_INTERNAL_PAGE_LIST(upl) \ 638 ((upl_page_info_t *)((upl_offset_to_pagelist == 0) ? \ 639 (unsigned int)upl + (unsigned int)(upl_offset_to_pagelist = upl_get_internal_pagelist_offset()): \ 640 (unsigned int)upl + (unsigned int)upl_offset_to_pagelist)) 641 642__BEGIN_DECLS 643 644extern ppnum_t upl_phys_page(upl_page_info_t *upl, int index); 645extern boolean_t upl_device_page(upl_page_info_t *upl); 646extern boolean_t upl_speculative_page(upl_page_info_t *upl, int index); 647extern void upl_clear_dirty(upl_t upl, boolean_t value); 648 649__END_DECLS 650 651#endif /* PRIVATE */ 652 653__BEGIN_DECLS 654 655extern boolean_t upl_page_present(upl_page_info_t *upl, int index); 656extern boolean_t upl_dirty_page(upl_page_info_t *upl, int index); 657extern boolean_t upl_valid_page(upl_page_info_t *upl, int index); 658extern void upl_deallocate(upl_t upl); 659 660__END_DECLS 661 662#endif /* KERNEL */ 663 664#endif /* _MACH_MEMORY_OBJECT_TYPES_H_ */ 665