1/* 2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28/* 29 * @OSF_COPYRIGHT@ 30 */ 31/* 32 * Mach Operating System 33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University 34 * All Rights Reserved. 35 * 36 * Permission to use, copy, modify and distribute this software and its 37 * documentation is hereby granted, provided that both the copyright 38 * notice and this permission notice appear in all copies of the 39 * software, derivative works or modified versions, and any portions 40 * thereof, and that both notices appear in supporting documentation. 41 * 42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR 44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 45 * 46 * Carnegie Mellon requests users of this software to return to 47 * 48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 49 * School of Computer Science 50 * Carnegie Mellon University 51 * Pittsburgh PA 15213-3890 52 * 53 * any improvements or extensions that they make and grant Carnegie Mellon 54 * the rights to redistribute these changes. 55 */ 56/* 57 */ 58/* 59 * File: memory_object.h 60 * Author: Michael Wayne Young 61 * 62 * External memory management interface definition. 63 */ 64 65#ifndef _MACH_MEMORY_OBJECT_TYPES_H_ 66#define _MACH_MEMORY_OBJECT_TYPES_H_ 67 68/* 69 * User-visible types used in the external memory 70 * management interface: 71 */ 72 73#include <mach/port.h> 74#include <mach/message.h> 75#include <mach/vm_prot.h> 76#include <mach/vm_sync.h> 77#include <mach/vm_types.h> 78#include <mach/machine/vm_types.h> 79 80#include <sys/cdefs.h> 81 82#define VM_64_BIT_DATA_OBJECTS 83 84typedef unsigned long long memory_object_offset_t; 85typedef unsigned long long memory_object_size_t; 86typedef natural_t memory_object_cluster_size_t; 87typedef natural_t * memory_object_fault_info_t; 88 89typedef unsigned long long vm_object_id_t; 90 91 92/* 93 * Temporary until real EMMI version gets re-implemented 94 */ 95 96#ifdef KERNEL_PRIVATE 97 98struct memory_object_pager_ops; /* forward declaration */ 99 100typedef struct memory_object { 101 unsigned int _pad1; /* struct ipc_object_header */ 102#ifdef __LP64__ 103 unsigned int _pad2; /* pad to natural boundary */ 104#endif 105 const struct memory_object_pager_ops *mo_pager_ops; 106} *memory_object_t; 107 108typedef struct memory_object_control { 109 unsigned int moc_ikot; /* struct ipc_object_header */ 110#ifdef __LP64__ 111 unsigned int _pad; /* pad to natural boundary */ 112#endif 113 struct vm_object *moc_object; 114} *memory_object_control_t; 115 116typedef const struct memory_object_pager_ops { 117 void (*memory_object_reference)( 118 memory_object_t mem_obj); 119 void (*memory_object_deallocate)( 120 memory_object_t mem_obj); 121 kern_return_t (*memory_object_init)( 122 memory_object_t mem_obj, 123 memory_object_control_t mem_control, 124 memory_object_cluster_size_t size); 125 kern_return_t (*memory_object_terminate)( 126 memory_object_t mem_obj); 127 kern_return_t (*memory_object_data_request)( 128 memory_object_t mem_obj, 129 memory_object_offset_t offset, 130 memory_object_cluster_size_t length, 131 vm_prot_t desired_access, 132 memory_object_fault_info_t fault_info); 133 kern_return_t (*memory_object_data_return)( 134 memory_object_t mem_obj, 135 memory_object_offset_t offset, 136 memory_object_cluster_size_t size, 137 memory_object_offset_t *resid_offset, 138 int *io_error, 139 boolean_t dirty, 140 boolean_t kernel_copy, 141 int upl_flags); 142 kern_return_t (*memory_object_data_initialize)( 143 memory_object_t mem_obj, 144 memory_object_offset_t offset, 145 memory_object_cluster_size_t size); 146 kern_return_t (*memory_object_data_unlock)( 147 memory_object_t mem_obj, 148 memory_object_offset_t offset, 149 memory_object_size_t size, 150 vm_prot_t desired_access); 151 kern_return_t (*memory_object_synchronize)( 152 memory_object_t mem_obj, 153 memory_object_offset_t offset, 154 memory_object_size_t size, 155 vm_sync_t sync_flags); 156 kern_return_t (*memory_object_map)( 157 memory_object_t mem_obj, 158 vm_prot_t prot); 159 kern_return_t (*memory_object_last_unmap)( 160 memory_object_t mem_obj); 161 kern_return_t (*memory_object_data_reclaim)( 162 memory_object_t mem_obj, 163 boolean_t reclaim_backing_store); 164 const char *memory_object_pager_name; 165} * memory_object_pager_ops_t; 166 167#else /* KERNEL_PRIVATE */ 168 169typedef mach_port_t memory_object_t; 170typedef mach_port_t memory_object_control_t; 171 172#endif /* KERNEL_PRIVATE */ 173 174typedef memory_object_t *memory_object_array_t; 175 /* A memory object ... */ 176 /* Used by the kernel to retrieve */ 177 /* or store data */ 178 179typedef mach_port_t memory_object_name_t; 180 /* Used to describe the memory ... */ 181 /* object in vm_regions() calls */ 182 183typedef mach_port_t memory_object_default_t; 184 /* Registered with the host ... */ 185 /* for creating new internal objects */ 186 187#define MEMORY_OBJECT_NULL ((memory_object_t) 0) 188#define MEMORY_OBJECT_CONTROL_NULL ((memory_object_control_t) 0) 189#define MEMORY_OBJECT_NAME_NULL ((memory_object_name_t) 0) 190#define MEMORY_OBJECT_DEFAULT_NULL ((memory_object_default_t) 0) 191 192 193typedef int memory_object_copy_strategy_t; 194 /* How memory manager handles copy: */ 195#define MEMORY_OBJECT_COPY_NONE 0 196 /* ... No special support */ 197#define MEMORY_OBJECT_COPY_CALL 1 198 /* ... Make call on memory manager */ 199#define MEMORY_OBJECT_COPY_DELAY 2 200 /* ... Memory manager doesn't 201 * change data externally. 202 */ 203#define MEMORY_OBJECT_COPY_TEMPORARY 3 204 /* ... Memory manager doesn't 205 * change data externally, and 206 * doesn't need to see changes. 207 */ 208#define MEMORY_OBJECT_COPY_SYMMETRIC 4 209 /* ... Memory manager doesn't 210 * change data externally, 211 * doesn't need to see changes, 212 * and object will not be 213 * multiply mapped. 214 * 215 * XXX 216 * Not yet safe for non-kernel use. 217 */ 218 219#define MEMORY_OBJECT_COPY_INVALID 5 220 /* ... An invalid copy strategy, 221 * for external objects which 222 * have not been initialized. 223 * Allows copy_strategy to be 224 * examined without also 225 * examining pager_ready and 226 * internal. 227 */ 228 229typedef int memory_object_return_t; 230 /* Which pages to return to manager 231 this time (lock_request) */ 232#define MEMORY_OBJECT_RETURN_NONE 0 233 /* ... don't return any. */ 234#define MEMORY_OBJECT_RETURN_DIRTY 1 235 /* ... only dirty pages. */ 236#define MEMORY_OBJECT_RETURN_ALL 2 237 /* ... dirty and precious pages. */ 238#define MEMORY_OBJECT_RETURN_ANYTHING 3 239 /* ... any resident page. */ 240 241/* 242 * Data lock request flags 243 */ 244 245#define MEMORY_OBJECT_DATA_FLUSH 0x1 246#define MEMORY_OBJECT_DATA_NO_CHANGE 0x2 247#define MEMORY_OBJECT_DATA_PURGE 0x4 248#define MEMORY_OBJECT_COPY_SYNC 0x8 249#define MEMORY_OBJECT_DATA_SYNC 0x10 250#define MEMORY_OBJECT_IO_SYNC 0x20 251#define MEMORY_OBJECT_DATA_FLUSH_ALL 0x40 252 253/* 254 * Types for the memory object flavor interfaces 255 */ 256 257#define MEMORY_OBJECT_INFO_MAX (1024) 258typedef int *memory_object_info_t; 259typedef int memory_object_flavor_t; 260typedef int memory_object_info_data_t[MEMORY_OBJECT_INFO_MAX]; 261 262 263#define MEMORY_OBJECT_PERFORMANCE_INFO 11 264#define MEMORY_OBJECT_ATTRIBUTE_INFO 14 265#define MEMORY_OBJECT_BEHAVIOR_INFO 15 266 267#ifdef PRIVATE 268 269#define OLD_MEMORY_OBJECT_BEHAVIOR_INFO 10 270#define OLD_MEMORY_OBJECT_ATTRIBUTE_INFO 12 271 272struct old_memory_object_behave_info { 273 memory_object_copy_strategy_t copy_strategy; 274 boolean_t temporary; 275 boolean_t invalidate; 276}; 277 278struct old_memory_object_attr_info { /* old attr list */ 279 boolean_t object_ready; 280 boolean_t may_cache; 281 memory_object_copy_strategy_t copy_strategy; 282}; 283 284typedef struct old_memory_object_behave_info *old_memory_object_behave_info_t; 285typedef struct old_memory_object_behave_info old_memory_object_behave_info_data_t; 286typedef struct old_memory_object_attr_info *old_memory_object_attr_info_t; 287typedef struct old_memory_object_attr_info old_memory_object_attr_info_data_t; 288 289#define OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT ((mach_msg_type_number_t) \ 290 (sizeof(old_memory_object_behave_info_data_t)/sizeof(int))) 291#define OLD_MEMORY_OBJECT_ATTR_INFO_COUNT ((mach_msg_type_number_t) \ 292 (sizeof(old_memory_object_attr_info_data_t)/sizeof(int))) 293 294#ifdef KERNEL 295 296__BEGIN_DECLS 297extern void memory_object_reference(memory_object_t object); 298extern void memory_object_deallocate(memory_object_t object); 299 300extern void memory_object_default_reference(memory_object_default_t); 301extern void memory_object_default_deallocate(memory_object_default_t); 302 303extern void memory_object_control_reference(memory_object_control_t control); 304extern void memory_object_control_deallocate(memory_object_control_t control); 305extern int memory_object_control_uiomove(memory_object_control_t, memory_object_offset_t, void *, int, int, int, int); 306__END_DECLS 307 308#endif /* KERNEL */ 309 310#endif /* PRIVATE */ 311 312struct memory_object_perf_info { 313 memory_object_cluster_size_t cluster_size; 314 boolean_t may_cache; 315}; 316 317struct memory_object_attr_info { 318 memory_object_copy_strategy_t copy_strategy; 319 memory_object_cluster_size_t cluster_size; 320 boolean_t may_cache_object; 321 boolean_t temporary; 322}; 323 324struct memory_object_behave_info { 325 memory_object_copy_strategy_t copy_strategy; 326 boolean_t temporary; 327 boolean_t invalidate; 328 boolean_t silent_overwrite; 329 boolean_t advisory_pageout; 330}; 331 332 333typedef struct memory_object_behave_info *memory_object_behave_info_t; 334typedef struct memory_object_behave_info memory_object_behave_info_data_t; 335 336typedef struct memory_object_perf_info *memory_object_perf_info_t; 337typedef struct memory_object_perf_info memory_object_perf_info_data_t; 338 339typedef struct memory_object_attr_info *memory_object_attr_info_t; 340typedef struct memory_object_attr_info memory_object_attr_info_data_t; 341 342#define MEMORY_OBJECT_BEHAVE_INFO_COUNT ((mach_msg_type_number_t) \ 343 (sizeof(memory_object_behave_info_data_t)/sizeof(int))) 344#define MEMORY_OBJECT_PERF_INFO_COUNT ((mach_msg_type_number_t) \ 345 (sizeof(memory_object_perf_info_data_t)/sizeof(int))) 346#define MEMORY_OBJECT_ATTR_INFO_COUNT ((mach_msg_type_number_t) \ 347 (sizeof(memory_object_attr_info_data_t)/sizeof(int))) 348 349#define invalid_memory_object_flavor(f) \ 350 (f != MEMORY_OBJECT_ATTRIBUTE_INFO && \ 351 f != MEMORY_OBJECT_PERFORMANCE_INFO && \ 352 f != OLD_MEMORY_OBJECT_BEHAVIOR_INFO && \ 353 f != MEMORY_OBJECT_BEHAVIOR_INFO && \ 354 f != OLD_MEMORY_OBJECT_ATTRIBUTE_INFO) 355 356 357/* 358 * Used to support options on memory_object_release_name call 359 */ 360#define MEMORY_OBJECT_TERMINATE_IDLE 0x1 361#define MEMORY_OBJECT_RESPECT_CACHE 0x2 362#define MEMORY_OBJECT_RELEASE_NO_OP 0x4 363 364 365/* named entry processor mapping options */ 366/* enumerated */ 367#define MAP_MEM_NOOP 0 368#define MAP_MEM_COPYBACK 1 369#define MAP_MEM_IO 2 370#define MAP_MEM_WTHRU 3 371#define MAP_MEM_WCOMB 4 /* Write combining mode */ 372 /* aka store gather */ 373#define MAP_MEM_INNERWBACK 5 374 375#define GET_MAP_MEM(flags) \ 376 ((((unsigned int)(flags)) >> 24) & 0xFF) 377 378#define SET_MAP_MEM(caching, flags) \ 379 ((flags) = ((((unsigned int)(caching)) << 24) \ 380 & 0xFF000000) | ((flags) & 0xFFFFFF)); 381 382/* leave room for vm_prot bits */ 383#define MAP_MEM_ONLY 0x010000 /* change processor caching */ 384#define MAP_MEM_NAMED_CREATE 0x020000 /* create extant object */ 385#define MAP_MEM_PURGABLE 0x040000 /* create a purgable VM object */ 386#define MAP_MEM_NAMED_REUSE 0x080000 /* reuse provided entry if identical */ 387 388#ifdef KERNEL 389 390/* 391 * Universal Page List data structures 392 * 393 * A UPL describes a bounded set of physical pages 394 * associated with some range of an object or map 395 * and a snapshot of the attributes associated with 396 * each of those pages. 397 */ 398#ifdef PRIVATE 399#define MAX_UPL_TRANSFER 256 400#define MAX_UPL_SIZE 8192 401 402struct upl_page_info { 403 ppnum_t phys_addr; /* physical page index number */ 404 unsigned int 405#ifdef XNU_KERNEL_PRIVATE 406 pageout:1, /* page is to be removed on commit */ 407 absent:1, /* No valid data in this page */ 408 dirty:1, /* Page must be cleaned (O) */ 409 precious:1, /* must be cleaned, we have only copy */ 410 device:1, /* no page data, mapped dev memory */ 411 speculative:1, /* page is valid, but not yet accessed */ 412 cs_validated:1, /* CODE SIGNING: page was validated */ 413 cs_tainted:1, /* CODE SIGNING: page is tainted */ 414 needed:1, /* page should be left in cache on abort */ 415 :0; /* force to long boundary */ 416#else 417 opaque; /* use upl_page_xxx() accessor funcs */ 418#endif /* XNU_KERNEL_PRIVATE */ 419}; 420 421#else 422 423struct upl_page_info { 424 unsigned int opaque[2]; /* use upl_page_xxx() accessor funcs */ 425}; 426 427#endif /* PRIVATE */ 428 429typedef struct upl_page_info upl_page_info_t; 430typedef upl_page_info_t *upl_page_info_array_t; 431typedef upl_page_info_array_t upl_page_list_ptr_t; 432 433typedef uint32_t upl_offset_t; /* page-aligned byte offset */ 434typedef uint32_t upl_size_t; /* page-aligned byte size */ 435 436/* upl invocation flags */ 437/* top nibble is used by super upl */ 438 439#define UPL_FLAGS_NONE 0x00000000 440#define UPL_COPYOUT_FROM 0x00000001 441#define UPL_PRECIOUS 0x00000002 442#define UPL_NO_SYNC 0x00000004 443#define UPL_CLEAN_IN_PLACE 0x00000008 444#define UPL_NOBLOCK 0x00000010 445#define UPL_RET_ONLY_DIRTY 0x00000020 446#define UPL_SET_INTERNAL 0x00000040 447#define UPL_QUERY_OBJECT_TYPE 0x00000080 448#define UPL_RET_ONLY_ABSENT 0x00000100 /* used only for COPY_FROM = FALSE */ 449#define UPL_FILE_IO 0x00000200 450#define UPL_SET_LITE 0x00000400 451#define UPL_SET_INTERRUPTIBLE 0x00000800 452#define UPL_SET_IO_WIRE 0x00001000 453#define UPL_FOR_PAGEOUT 0x00002000 454#define UPL_WILL_BE_DUMPED 0x00004000 455#define UPL_FORCE_DATA_SYNC 0x00008000 456/* continued after the ticket bits... */ 457 458#define UPL_PAGE_TICKET_MASK 0x000F0000 459#define UPL_PAGE_TICKET_SHIFT 16 460 461/* ... flags resume here */ 462#define UPL_BLOCK_ACCESS 0x00100000 463#define UPL_ENCRYPT 0x00200000 464#define UPL_NOZEROFILL 0x00400000 465#define UPL_WILL_MODIFY 0x00800000 /* caller will modify the pages */ 466 467#define UPL_NEED_32BIT_ADDR 0x01000000 468#define UPL_UBC_MSYNC 0x02000000 469#define UPL_UBC_PAGEOUT 0x04000000 470#define UPL_UBC_PAGEIN 0x08000000 471#define UPL_REQUEST_SET_DIRTY 0x10000000 472 473/* UPL flags known by this kernel */ 474#define UPL_VALID_FLAGS 0x1FFFFFFF 475 476 477/* upl abort error flags */ 478#define UPL_ABORT_RESTART 0x1 479#define UPL_ABORT_UNAVAILABLE 0x2 480#define UPL_ABORT_ERROR 0x4 481#define UPL_ABORT_FREE_ON_EMPTY 0x8 /* only implemented in wrappers */ 482#define UPL_ABORT_DUMP_PAGES 0x10 483#define UPL_ABORT_NOTIFY_EMPTY 0x20 484/* deprecated: #define UPL_ABORT_ALLOW_ACCESS 0x40 */ 485#define UPL_ABORT_REFERENCE 0x80 486 487/* upl pages check flags */ 488#define UPL_CHECK_DIRTY 0x1 489 490 491/* 492 * upl pagein/pageout flags 493 * 494 * 495 * when I/O is issued from this UPL it should be done synchronously 496 */ 497#define UPL_IOSYNC 0x1 498 499/* 500 * the passed in UPL should not have either a commit or abort 501 * applied to it by the underlying layers... the site that 502 * created the UPL is responsible for cleaning it up. 503 */ 504#define UPL_NOCOMMIT 0x2 505 506/* 507 * turn off any speculative read-ahead applied at the I/O layer 508 */ 509#define UPL_NORDAHEAD 0x4 510 511/* 512 * pageout request is targeting a real file 513 * as opposed to a swap file. 514 */ 515 516#define UPL_VNODE_PAGER 0x8 517/* 518 * this pageout is being originated as part of an explicit 519 * memory synchronization operation... no speculative clustering 520 * should be applied, only the range specified should be pushed. 521 */ 522#define UPL_MSYNC 0x10 523 524/* 525 * 526 */ 527#define UPL_PAGING_ENCRYPTED 0x20 528 529/* 530 * this pageout is being originated as part of an explicit 531 * memory synchronization operation that is checking for I/O 532 * errors and taking it's own action... if an error occurs, 533 * just abort the pages back into the cache unchanged 534 */ 535#define UPL_KEEPCACHED 0x40 536 537/* 538 * this pageout originated from within cluster_io to deal 539 * with a dirty page that hasn't yet been seen by the FS 540 * that backs it... tag it so that the FS can take the 541 * appropriate action w/r to its locking model since the 542 * pageout will reenter the FS for the same file currently 543 * being handled in this context. 544 */ 545#define UPL_NESTED_PAGEOUT 0x80 546 547/* 548 * we've detected a sequential access pattern and 549 * we are speculatively and aggressively pulling 550 * pages in... do not count these as real PAGEINs 551 * w/r to our hard throttle maintenance 552 */ 553#define UPL_IOSTREAMING 0x100 554 555 556 557 558/* upl commit flags */ 559#define UPL_COMMIT_FREE_ON_EMPTY 0x1 /* only implemented in wrappers */ 560#define UPL_COMMIT_CLEAR_DIRTY 0x2 561#define UPL_COMMIT_SET_DIRTY 0x4 562#define UPL_COMMIT_INACTIVATE 0x8 563#define UPL_COMMIT_NOTIFY_EMPTY 0x10 564/* deprecated: #define UPL_COMMIT_ALLOW_ACCESS 0x20 */ 565#define UPL_COMMIT_CS_VALIDATED 0x40 566#define UPL_COMMIT_CLEAR_PRECIOUS 0x80 567#define UPL_COMMIT_SPECULATE 0x100 568#define UPL_COMMIT_FREE_ABSENT 0x200 569 570#define UPL_COMMIT_KERNEL_ONLY_FLAGS (UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_FREE_ABSENT) 571 572/* flags for return of state from vm_map_get_upl, vm_upl address space */ 573/* based call */ 574#define UPL_DEV_MEMORY 0x1 575#define UPL_PHYS_CONTIG 0x2 576 577 578/* 579 * Flags for the UPL page ops routine. This routine is not exported 580 * out of the kernel at the moment and so the defs live here. 581 */ 582#define UPL_POP_DIRTY 0x1 583#define UPL_POP_PAGEOUT 0x2 584#define UPL_POP_PRECIOUS 0x4 585#define UPL_POP_ABSENT 0x8 586#define UPL_POP_BUSY 0x10 587 588#define UPL_POP_PHYSICAL 0x10000000 589#define UPL_POP_DUMP 0x20000000 590#define UPL_POP_SET 0x40000000 591#define UPL_POP_CLR 0x80000000 592 593/* 594 * Flags for the UPL range op routine. This routine is not exported 595 * out of the kernel at the moemet and so the defs live here. 596 */ 597/* 598 * UPL_ROP_ABSENT: Returns the extent of the range presented which 599 * is absent, starting with the start address presented 600 */ 601#define UPL_ROP_ABSENT 0x01 602/* 603 * UPL_ROP_PRESENT: Returns the extent of the range presented which 604 * is present (i.e. resident), starting with the start address presented 605 */ 606#define UPL_ROP_PRESENT 0x02 607/* 608 * UPL_ROP_DUMP: Dump the pages which are found in the target object 609 * for the target range. 610 */ 611#define UPL_ROP_DUMP 0x04 612 613#ifdef PRIVATE 614 615/* access macros for upl_t */ 616 617#define UPL_DEVICE_PAGE(upl) \ 618 (((upl)[0].phys_addr != 0) ? ((upl)[0].device) : FALSE) 619 620#define UPL_PAGE_PRESENT(upl, index) \ 621 ((upl)[(index)].phys_addr != 0) 622 623#define UPL_PHYS_PAGE(upl, index) \ 624 ((upl)[(index)].phys_addr) 625 626#define UPL_SPECULATIVE_PAGE(upl, index) \ 627 (((upl)[(index)].phys_addr != 0) ? ((upl)[(index)].speculative) : FALSE) 628 629#define UPL_DIRTY_PAGE(upl, index) \ 630 (((upl)[(index)].phys_addr != 0) ? ((upl)[(index)].dirty) : FALSE) 631 632#define UPL_PRECIOUS_PAGE(upl, index) \ 633 (((upl)[(index)].phys_addr != 0) ? ((upl)[(index)].precious) : FALSE) 634 635#define UPL_VALID_PAGE(upl, index) \ 636 (((upl)[(index)].phys_addr != 0) ? (!((upl)[(index)].absent)) : FALSE) 637 638#define UPL_PAGEOUT_PAGE(upl, index) \ 639 (((upl)[(index)].phys_addr != 0) ? ((upl)[(index)].pageout) : FALSE) 640 641#define UPL_SET_PAGE_FREE_ON_COMMIT(upl, index) \ 642 (((upl)[(index)].phys_addr != 0) ? \ 643 ((upl)[(index)].pageout = TRUE) : FALSE) 644 645#define UPL_CLR_PAGE_FREE_ON_COMMIT(upl, index) \ 646 (((upl)[(index)].phys_addr != 0) ? \ 647 ((upl)[(index)].pageout = FALSE) : FALSE) 648 649/* modifier macros for upl_t */ 650 651#define UPL_SET_CS_VALIDATED(upl, index, value) \ 652 ((upl)[(index)].cs_validated = ((value) ? TRUE : FALSE)) 653 654#define UPL_SET_CS_TAINTED(upl, index, value) \ 655 ((upl)[(index)].cs_tainted = ((value) ? TRUE : FALSE)) 656 657/* The call prototyped below is used strictly by UPL_GET_INTERNAL_PAGE_LIST */ 658 659extern vm_size_t upl_offset_to_pagelist; 660extern vm_size_t upl_get_internal_pagelist_offset(void); 661extern void* upl_get_internal_vectorupl(upl_t); 662extern upl_page_info_t* upl_get_internal_vectorupl_pagelist(upl_t); 663 664/*Use this variant to get the UPL's page list iff:*/ 665/*- the upl being passed in is already part of a vector UPL*/ 666/*- the page list you want is that of this "sub-upl" and not that of the entire vector-upl*/ 667 668#define UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(upl) \ 669 ((upl_page_info_t *)((upl_offset_to_pagelist == 0) ? \ 670 (uintptr_t)upl + (unsigned int)(upl_offset_to_pagelist = upl_get_internal_pagelist_offset()): \ 671 (uintptr_t)upl + (unsigned int)upl_offset_to_pagelist)) 672 673/* UPL_GET_INTERNAL_PAGE_LIST is only valid on internal objects where the */ 674/* list request was made with the UPL_INTERNAL flag */ 675 676 677#define UPL_GET_INTERNAL_PAGE_LIST(upl) \ 678 ((upl_get_internal_vectorupl(upl) != NULL ) ? (upl_get_internal_vectorupl_pagelist(upl)) : \ 679 ((upl_page_info_t *)((upl_offset_to_pagelist == 0) ? \ 680 (uintptr_t)upl + (unsigned int)(upl_offset_to_pagelist = upl_get_internal_pagelist_offset()): \ 681 (uintptr_t)upl + (unsigned int)upl_offset_to_pagelist))) 682 683__BEGIN_DECLS 684 685extern ppnum_t upl_phys_page(upl_page_info_t *upl, int index); 686extern boolean_t upl_device_page(upl_page_info_t *upl); 687extern boolean_t upl_speculative_page(upl_page_info_t *upl, int index); 688extern void upl_clear_dirty(upl_t upl, boolean_t value); 689extern void upl_set_referenced(upl_t upl, boolean_t value); 690extern void upl_range_needed(upl_t upl, int index, int count); 691 692__END_DECLS 693 694#endif /* PRIVATE */ 695 696__BEGIN_DECLS 697 698extern boolean_t upl_page_present(upl_page_info_t *upl, int index); 699extern boolean_t upl_dirty_page(upl_page_info_t *upl, int index); 700extern boolean_t upl_valid_page(upl_page_info_t *upl, int index); 701extern void upl_deallocate(upl_t upl); 702 703__END_DECLS 704 705#endif /* KERNEL */ 706 707#endif /* _MACH_MEMORY_OBJECT_TYPES_H_ */ 708