1/*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
49 *  School of Computer Science
50 *  Carnegie Mellon University
51 *  Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 *	File:	memory_object.h
60 *	Author:	Michael Wayne Young
61 *
62 *	External memory management interface definition.
63 */
64
65#ifndef	_MACH_MEMORY_OBJECT_TYPES_H_
66#define _MACH_MEMORY_OBJECT_TYPES_H_
67
68/*
69 *	User-visible types used in the external memory
70 *	management interface:
71 */
72
73#include <mach/port.h>
74#include <mach/message.h>
75#include <mach/vm_prot.h>
76#include <mach/vm_sync.h>
77#include <mach/vm_types.h>
78#include <mach/machine/vm_types.h>
79
80#include <sys/cdefs.h>
81
82#define VM_64_BIT_DATA_OBJECTS
83
84typedef unsigned long long	memory_object_offset_t;
85typedef unsigned long long	memory_object_size_t;
86typedef natural_t		memory_object_cluster_size_t;
87typedef natural_t *		memory_object_fault_info_t;
88
89typedef unsigned long long 	vm_object_id_t;
90
91
92/*
93 * Temporary until real EMMI version gets re-implemented
94 */
95
96#ifdef	KERNEL_PRIVATE
97
98struct memory_object_pager_ops;	/* forward declaration */
99
100typedef struct 		memory_object {
101	unsigned int	_pad1; /* struct ipc_object_header */
102#ifdef __LP64__
103	unsigned int	_pad2; /* pad to natural boundary */
104#endif
105	const struct memory_object_pager_ops	*mo_pager_ops;
106} *memory_object_t;
107
108typedef struct		memory_object_control {
109	unsigned int	moc_ikot; /* struct ipc_object_header */
110#ifdef __LP64__
111	unsigned int	_pad; /* pad to natural boundary */
112#endif
113	struct vm_object *moc_object;
114} *memory_object_control_t;
115
116typedef const struct memory_object_pager_ops {
117	void (*memory_object_reference)(
118		memory_object_t mem_obj);
119	void (*memory_object_deallocate)(
120		memory_object_t mem_obj);
121	kern_return_t (*memory_object_init)(
122		memory_object_t mem_obj,
123		memory_object_control_t mem_control,
124		memory_object_cluster_size_t size);
125	kern_return_t (*memory_object_terminate)(
126		memory_object_t mem_obj);
127	kern_return_t (*memory_object_data_request)(
128		memory_object_t mem_obj,
129		memory_object_offset_t offset,
130		memory_object_cluster_size_t length,
131		vm_prot_t desired_access,
132		memory_object_fault_info_t fault_info);
133	kern_return_t (*memory_object_data_return)(
134		memory_object_t mem_obj,
135		memory_object_offset_t offset,
136		memory_object_cluster_size_t size,
137		memory_object_offset_t *resid_offset,
138		int *io_error,
139		boolean_t dirty,
140		boolean_t kernel_copy,
141		int upl_flags);
142	kern_return_t (*memory_object_data_initialize)(
143		memory_object_t mem_obj,
144		memory_object_offset_t offset,
145		memory_object_cluster_size_t size);
146	kern_return_t (*memory_object_data_unlock)(
147		memory_object_t mem_obj,
148		memory_object_offset_t offset,
149		memory_object_size_t size,
150		vm_prot_t desired_access);
151	kern_return_t (*memory_object_synchronize)(
152		memory_object_t mem_obj,
153		memory_object_offset_t offset,
154		memory_object_size_t size,
155		vm_sync_t sync_flags);
156	kern_return_t (*memory_object_map)(
157		memory_object_t mem_obj,
158		vm_prot_t prot);
159	kern_return_t (*memory_object_last_unmap)(
160		memory_object_t mem_obj);
161	kern_return_t (*memory_object_data_reclaim)(
162		memory_object_t mem_obj,
163		boolean_t reclaim_backing_store);
164	const char *memory_object_pager_name;
165} * memory_object_pager_ops_t;
166
167#else	/* KERNEL_PRIVATE */
168
169typedef mach_port_t	memory_object_t;
170typedef mach_port_t	memory_object_control_t;
171
172#endif	/* KERNEL_PRIVATE */
173
174typedef memory_object_t *memory_object_array_t;
175					/* A memory object ... */
176					/*  Used by the kernel to retrieve */
177					/*  or store data */
178
179typedef	mach_port_t	memory_object_name_t;
180					/* Used to describe the memory ... */
181					/*  object in vm_regions() calls */
182
183typedef mach_port_t	memory_object_default_t;
184					/* Registered with the host ... */
185					/*  for creating new internal objects */
186
187#define MEMORY_OBJECT_NULL		((memory_object_t) 0)
188#define MEMORY_OBJECT_CONTROL_NULL	((memory_object_control_t) 0)
189#define MEMORY_OBJECT_NAME_NULL		((memory_object_name_t) 0)
190#define MEMORY_OBJECT_DEFAULT_NULL	((memory_object_default_t) 0)
191
192
193typedef	int		memory_object_copy_strategy_t;
194					/* How memory manager handles copy: */
195#define		MEMORY_OBJECT_COPY_NONE		0
196					/* ... No special support */
197#define		MEMORY_OBJECT_COPY_CALL		1
198					/* ... Make call on memory manager */
199#define		MEMORY_OBJECT_COPY_DELAY 	2
200					/* ... Memory manager doesn't
201					 *     change data externally.
202					 */
203#define		MEMORY_OBJECT_COPY_TEMPORARY 	3
204					/* ... Memory manager doesn't
205					 *     change data externally, and
206					 *     doesn't need to see changes.
207					 */
208#define		MEMORY_OBJECT_COPY_SYMMETRIC 	4
209					/* ... Memory manager doesn't
210					 *     change data externally,
211					 *     doesn't need to see changes,
212					 *     and object will not be
213					 *     multiply mapped.
214					 *
215					 *     XXX
216					 *     Not yet safe for non-kernel use.
217					 */
218
219#define		MEMORY_OBJECT_COPY_INVALID	5
220					/* ...	An invalid copy strategy,
221					 *	for external objects which
222					 *	have not been initialized.
223					 *	Allows copy_strategy to be
224					 *	examined without also
225					 *	examining pager_ready and
226					 *	internal.
227					 */
228
229typedef	int		memory_object_return_t;
230					/* Which pages to return to manager
231					   this time (lock_request) */
232#define		MEMORY_OBJECT_RETURN_NONE	0
233					/* ... don't return any. */
234#define		MEMORY_OBJECT_RETURN_DIRTY	1
235					/* ... only dirty pages. */
236#define		MEMORY_OBJECT_RETURN_ALL	2
237					/* ... dirty and precious pages. */
238#define		MEMORY_OBJECT_RETURN_ANYTHING	3
239					/* ... any resident page. */
240
241/*
242 *	Data lock request flags
243 */
244
245#define		MEMORY_OBJECT_DATA_FLUSH 	0x1
246#define		MEMORY_OBJECT_DATA_NO_CHANGE	0x2
247#define		MEMORY_OBJECT_DATA_PURGE	0x4
248#define		MEMORY_OBJECT_COPY_SYNC		0x8
249#define		MEMORY_OBJECT_DATA_SYNC		0x10
250#define         MEMORY_OBJECT_IO_SYNC           0x20
251#define		MEMORY_OBJECT_DATA_FLUSH_ALL	0x40
252
253/*
254 *	Types for the memory object flavor interfaces
255 */
256
257#define MEMORY_OBJECT_INFO_MAX      (1024)
258typedef int     *memory_object_info_t;
259typedef int	 memory_object_flavor_t;
260typedef int      memory_object_info_data_t[MEMORY_OBJECT_INFO_MAX];
261
262
263#define MEMORY_OBJECT_PERFORMANCE_INFO	11
264#define MEMORY_OBJECT_ATTRIBUTE_INFO	14
265#define MEMORY_OBJECT_BEHAVIOR_INFO 	15
266
267#ifdef	PRIVATE
268
269#define OLD_MEMORY_OBJECT_BEHAVIOR_INFO 	10
270#define OLD_MEMORY_OBJECT_ATTRIBUTE_INFO	12
271
272struct old_memory_object_behave_info {
273	memory_object_copy_strategy_t	copy_strategy;
274	boolean_t			temporary;
275	boolean_t			invalidate;
276};
277
278struct old_memory_object_attr_info {			/* old attr list */
279        boolean_t       		object_ready;
280        boolean_t       		may_cache;
281        memory_object_copy_strategy_t 	copy_strategy;
282};
283
284typedef struct old_memory_object_behave_info *old_memory_object_behave_info_t;
285typedef struct old_memory_object_behave_info old_memory_object_behave_info_data_t;
286typedef struct old_memory_object_attr_info *old_memory_object_attr_info_t;
287typedef struct old_memory_object_attr_info old_memory_object_attr_info_data_t;
288
289#define OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT   	((mach_msg_type_number_t) \
290                (sizeof(old_memory_object_behave_info_data_t)/sizeof(int)))
291#define OLD_MEMORY_OBJECT_ATTR_INFO_COUNT	((mach_msg_type_number_t) \
292		(sizeof(old_memory_object_attr_info_data_t)/sizeof(int)))
293
294#ifdef KERNEL
295
296__BEGIN_DECLS
297extern void memory_object_reference(memory_object_t object);
298extern void memory_object_deallocate(memory_object_t object);
299
300extern void memory_object_default_reference(memory_object_default_t);
301extern void memory_object_default_deallocate(memory_object_default_t);
302
303extern void memory_object_control_reference(memory_object_control_t control);
304extern void memory_object_control_deallocate(memory_object_control_t control);
305extern int  memory_object_control_uiomove(memory_object_control_t, memory_object_offset_t, void *, int, int, int, int);
306__END_DECLS
307
308#endif  /* KERNEL */
309
310#endif	/* PRIVATE */
311
312struct memory_object_perf_info {
313	memory_object_cluster_size_t	cluster_size;
314	boolean_t			may_cache;
315};
316
317struct memory_object_attr_info {
318	memory_object_copy_strategy_t	copy_strategy;
319	memory_object_cluster_size_t	cluster_size;
320	boolean_t			may_cache_object;
321	boolean_t			temporary;
322};
323
324struct memory_object_behave_info {
325	memory_object_copy_strategy_t	copy_strategy;
326	boolean_t			temporary;
327	boolean_t			invalidate;
328	boolean_t			silent_overwrite;
329	boolean_t			advisory_pageout;
330};
331
332
333typedef struct memory_object_behave_info *memory_object_behave_info_t;
334typedef struct memory_object_behave_info memory_object_behave_info_data_t;
335
336typedef struct memory_object_perf_info 	*memory_object_perf_info_t;
337typedef struct memory_object_perf_info	memory_object_perf_info_data_t;
338
339typedef struct memory_object_attr_info	*memory_object_attr_info_t;
340typedef struct memory_object_attr_info	memory_object_attr_info_data_t;
341
342#define MEMORY_OBJECT_BEHAVE_INFO_COUNT ((mach_msg_type_number_t)	\
343                (sizeof(memory_object_behave_info_data_t)/sizeof(int)))
344#define MEMORY_OBJECT_PERF_INFO_COUNT	((mach_msg_type_number_t)	\
345		(sizeof(memory_object_perf_info_data_t)/sizeof(int)))
346#define MEMORY_OBJECT_ATTR_INFO_COUNT	((mach_msg_type_number_t)	\
347		(sizeof(memory_object_attr_info_data_t)/sizeof(int)))
348
349#define invalid_memory_object_flavor(f)					\
350	(f != MEMORY_OBJECT_ATTRIBUTE_INFO && 				\
351	 f != MEMORY_OBJECT_PERFORMANCE_INFO && 			\
352	 f != OLD_MEMORY_OBJECT_BEHAVIOR_INFO &&			\
353	 f != MEMORY_OBJECT_BEHAVIOR_INFO &&				\
354	 f != OLD_MEMORY_OBJECT_ATTRIBUTE_INFO)
355
356
357/*
358 * Used to support options on memory_object_release_name call
359 */
360#define MEMORY_OBJECT_TERMINATE_IDLE	0x1
361#define MEMORY_OBJECT_RESPECT_CACHE	0x2
362#define MEMORY_OBJECT_RELEASE_NO_OP	0x4
363
364
365/* named entry processor mapping options */
366/* enumerated */
367#define MAP_MEM_NOOP		0
368#define MAP_MEM_COPYBACK	1
369#define MAP_MEM_IO		2
370#define MAP_MEM_WTHRU		3
371#define MAP_MEM_WCOMB		4	/* Write combining mode */
372					/* aka store gather     */
373#define MAP_MEM_INNERWBACK	5
374
375#define GET_MAP_MEM(flags)	\
376	((((unsigned int)(flags)) >> 24) & 0xFF)
377
378#define SET_MAP_MEM(caching, flags)	\
379	((flags) = ((((unsigned int)(caching)) << 24) \
380			& 0xFF000000) | ((flags) & 0xFFFFFF));
381
382/* leave room for vm_prot bits */
383#define MAP_MEM_ONLY		0x010000 /* change processor caching  */
384#define MAP_MEM_NAMED_CREATE	0x020000 /* create extant object      */
385#define MAP_MEM_PURGABLE	0x040000 /* create a purgable VM object */
386#define MAP_MEM_NAMED_REUSE	0x080000 /* reuse provided entry if identical */
387#define MAP_MEM_USE_DATA_ADDR	0x100000 /* preserve address of data, rather than base of page */
388#define MAP_MEM_VM_COPY		0x200000 /* make a copy of a VM range */
389#define MAP_MEM_VM_SHARE	0x400000 /* extract a VM range for remap */
390
391#ifdef KERNEL
392
393/*
394 *  Universal Page List data structures
395 *
396 *  A UPL describes a bounded set of physical pages
397 *  associated with some range of an object or map
398 *  and a snapshot of the attributes associated with
399 *  each of those pages.
400 */
401#ifdef PRIVATE
402#define MAX_UPL_TRANSFER_BYTES	(1024 * 1024)
403#define MAX_UPL_SIZE_BYTES	(1024 * 1024 * 64)
404
405#define MAX_UPL_SIZE		(MAX_UPL_SIZE_BYTES / PAGE_SIZE)
406#define	MAX_UPL_TRANSFER	(MAX_UPL_TRANSFER_BYTES / PAGE_SIZE)
407
408
409struct upl_page_info {
410	ppnum_t		phys_addr;	/* physical page index number */
411	unsigned int
412#ifdef  XNU_KERNEL_PRIVATE
413		pageout:1,      /* page is to be removed on commit */
414		absent:1,       /* No valid data in this page */
415		dirty:1,        /* Page must be cleaned (O) */
416		precious:1,     /* must be cleaned, we have only copy */
417		device:1,	/* no page data, mapped dev memory */
418		speculative:1,  /* page is valid, but not yet accessed */
419		cs_validated:1,	/* CODE SIGNING: page was validated */
420		cs_tainted:1,	/* CODE SIGNING: page is tainted */
421		needed:1,	/* page should be left in cache on abort */
422		:0;		/* force to long boundary */
423#else
424		opaque;		/* use upl_page_xxx() accessor funcs */
425#endif /* XNU_KERNEL_PRIVATE */
426};
427
428#else
429
430struct upl_page_info {
431	unsigned int	opaque[2];	/* use upl_page_xxx() accessor funcs */
432};
433
434#endif /* PRIVATE */
435
436typedef struct upl_page_info	upl_page_info_t;
437typedef upl_page_info_t		*upl_page_info_array_t;
438typedef upl_page_info_array_t	upl_page_list_ptr_t;
439
440typedef uint32_t	upl_offset_t;	/* page-aligned byte offset */
441typedef uint32_t	upl_size_t;	/* page-aligned byte size */
442
443/* upl invocation flags */
444/* top nibble is used by super upl */
445
446#define UPL_FLAGS_NONE		0x00000000
447#define UPL_COPYOUT_FROM	0x00000001
448#define UPL_PRECIOUS		0x00000002
449#define UPL_NO_SYNC		0x00000004
450#define UPL_CLEAN_IN_PLACE	0x00000008
451#define UPL_NOBLOCK		0x00000010
452#define UPL_RET_ONLY_DIRTY	0x00000020
453#define UPL_SET_INTERNAL	0x00000040
454#define UPL_QUERY_OBJECT_TYPE	0x00000080
455#define UPL_RET_ONLY_ABSENT	0x00000100 /* used only for COPY_FROM = FALSE */
456#define UPL_FILE_IO             0x00000200
457#define UPL_SET_LITE		0x00000400
458#define UPL_SET_INTERRUPTIBLE	0x00000800
459#define UPL_SET_IO_WIRE		0x00001000
460#define UPL_FOR_PAGEOUT		0x00002000
461#define UPL_WILL_BE_DUMPED      0x00004000
462#define UPL_FORCE_DATA_SYNC	0x00008000
463/* continued after the ticket bits... */
464
465#define UPL_PAGE_TICKET_MASK	0x000F0000
466#define UPL_PAGE_TICKET_SHIFT   16
467
468/* ... flags resume here */
469#define UPL_BLOCK_ACCESS	0x00100000
470#define UPL_ENCRYPT		0x00200000
471#define UPL_NOZEROFILL		0x00400000
472#define UPL_WILL_MODIFY		0x00800000 /* caller will modify the pages */
473
474#define UPL_NEED_32BIT_ADDR	0x01000000
475#define UPL_UBC_MSYNC		0x02000000
476#define UPL_UBC_PAGEOUT		0x04000000
477#define UPL_UBC_PAGEIN		0x08000000
478#define UPL_REQUEST_SET_DIRTY	0x10000000
479#define UPL_REQUEST_NO_FAULT	0x20000000 /* fail if pages not all resident */
480#define UPL_NOZEROFILLIO	0x40000000 /* allow non zerofill pages present */
481#define UPL_REQUEST_FORCE_COHERENCY	0x80000000
482
483/* UPL flags known by this kernel */
484#define UPL_VALID_FLAGS		0xFFFFFFFF
485
486
487/* upl abort error flags */
488#define UPL_ABORT_RESTART		0x1
489#define UPL_ABORT_UNAVAILABLE	0x2
490#define UPL_ABORT_ERROR		0x4
491#define UPL_ABORT_FREE_ON_EMPTY	0x8  /* only implemented in wrappers */
492#define UPL_ABORT_DUMP_PAGES	0x10
493#define UPL_ABORT_NOTIFY_EMPTY	0x20
494/* deprecated: #define UPL_ABORT_ALLOW_ACCESS	0x40 */
495#define UPL_ABORT_REFERENCE	0x80
496
497/* upl pages check flags */
498#define UPL_CHECK_DIRTY         0x1
499
500
501/*
502 *  upl pagein/pageout  flags
503 *
504 *
505 * when I/O is issued from this UPL it should be done synchronously
506 */
507#define UPL_IOSYNC	0x1
508
509/*
510 * the passed in UPL should not have either a commit or abort
511 * applied to it by the underlying layers... the site that
512 * created the UPL is responsible for cleaning it up.
513 */
514#define UPL_NOCOMMIT	0x2
515
516/*
517 * turn off any speculative read-ahead applied at the I/O layer
518 */
519#define UPL_NORDAHEAD	0x4
520
521/*
522 * pageout request is targeting a real file
523 * as opposed to a swap file.
524 */
525
526#define UPL_VNODE_PAGER	0x8
527/*
528 * this pageout is being originated as part of an explicit
529 * memory synchronization operation... no speculative clustering
530 * should be applied, only the range specified should be pushed.
531 */
532#define UPL_MSYNC		0x10
533
534/*
535 *
536 */
537#define UPL_PAGING_ENCRYPTED	0x20
538
539/*
540 * this pageout is being originated as part of an explicit
541 * memory synchronization operation that is checking for I/O
542 * errors and taking it's own action... if an error occurs,
543 * just abort the pages back into the cache unchanged
544 */
545#define UPL_KEEPCACHED		0x40
546
547/*
548 * this pageout originated from within cluster_io to deal
549 * with a dirty page that hasn't yet been seen by the FS
550 * that backs it... tag it so that the FS can take the
551 * appropriate action w/r to its locking model since the
552 * pageout will reenter the FS for the same file currently
553 * being handled in this context.
554 */
555#define UPL_NESTED_PAGEOUT	0x80
556
557/*
558 * we've detected a sequential access pattern and
559 * we are speculatively and aggressively pulling
560 * pages in... do not count these as real PAGEINs
561 * w/r to our hard throttle maintenance
562 */
563#define UPL_IOSTREAMING		0x100
564
565/*
566 * Currently, it's only used for the swap pagein path.
567 * Since the swap + compressed pager layer manage their
568 * pages, these pages are not marked "absent" i.e. these
569 * are "valid" pages. The pagein path will _not_ issue an
570 * I/O (correctly) for valid pages. So, this flag is used
571 * to override that logic in the vnode I/O path.
572 */
573#define UPL_IGNORE_VALID_PAGE_CHECK	0x200
574
575
576
577/* upl commit flags */
578#define UPL_COMMIT_FREE_ON_EMPTY	0x1 /* only implemented in wrappers */
579#define UPL_COMMIT_CLEAR_DIRTY		0x2
580#define UPL_COMMIT_SET_DIRTY		0x4
581#define UPL_COMMIT_INACTIVATE		0x8
582#define UPL_COMMIT_NOTIFY_EMPTY		0x10
583/* deprecated: #define UPL_COMMIT_ALLOW_ACCESS		0x20 */
584#define UPL_COMMIT_CS_VALIDATED		0x40
585#define UPL_COMMIT_CLEAR_PRECIOUS	0x80
586#define UPL_COMMIT_SPECULATE		0x100
587#define UPL_COMMIT_FREE_ABSENT		0x200
588#define UPL_COMMIT_WRITTEN_BY_KERNEL	0x400
589
590#define UPL_COMMIT_KERNEL_ONLY_FLAGS	(UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_FREE_ABSENT)
591
592/* flags for return of state from vm_map_get_upl,  vm_upl address space */
593/* based call */
594#define UPL_DEV_MEMORY			0x1
595#define UPL_PHYS_CONTIG			0x2
596
597
598/*
599 * Flags for the UPL page ops routine.  This routine is not exported
600 * out of the kernel at the moment and so the defs live here.
601 */
602#define UPL_POP_DIRTY		0x1
603#define UPL_POP_PAGEOUT		0x2
604#define UPL_POP_PRECIOUS		0x4
605#define UPL_POP_ABSENT		0x8
606#define UPL_POP_BUSY			0x10
607
608#define UPL_POP_PHYSICAL	0x10000000
609#define UPL_POP_DUMP		0x20000000
610#define UPL_POP_SET		0x40000000
611#define UPL_POP_CLR		0x80000000
612
613/*
614 * Flags for the UPL range op routine.  This routine is not exported
615 * out of the kernel at the moemet and so the defs live here.
616 */
617/*
618 * UPL_ROP_ABSENT: Returns the extent of the range presented which
619 * is absent, starting with the start address presented
620 */
621#define UPL_ROP_ABSENT		0x01
622/*
623 * UPL_ROP_PRESENT: Returns the extent of the range presented which
624 * is present (i.e. resident), starting with the start address presented
625 */
626#define UPL_ROP_PRESENT		0x02
627/*
628 * UPL_ROP_DUMP: Dump the pages which are found in the target object
629 * for the target range.
630 */
631#define UPL_ROP_DUMP			0x04
632
633#ifdef	PRIVATE
634
635#define UPL_REPRIO_INFO_MASK 	(0xFFFFFFFF)
636#define UPL_REPRIO_INFO_SHIFT 	32
637
638/* access macros for upl_t */
639
640#define UPL_DEVICE_PAGE(upl) \
641	(((upl)[0].phys_addr != 0) ? ((upl)[0].device) : FALSE)
642
643#define UPL_PAGE_PRESENT(upl, index) \
644	((upl)[(index)].phys_addr != 0)
645
646#define UPL_PHYS_PAGE(upl, index) \
647	((upl)[(index)].phys_addr)
648
649#define UPL_SPECULATIVE_PAGE(upl, index) \
650	(((upl)[(index)].phys_addr != 0) ? ((upl)[(index)].speculative) : FALSE)
651
652#define UPL_DIRTY_PAGE(upl, index) \
653	(((upl)[(index)].phys_addr != 0) ? ((upl)[(index)].dirty) : FALSE)
654
655#define UPL_PRECIOUS_PAGE(upl, index) \
656	(((upl)[(index)].phys_addr != 0) ? ((upl)[(index)].precious) : FALSE)
657
658#define UPL_VALID_PAGE(upl, index) \
659	(((upl)[(index)].phys_addr != 0) ? (!((upl)[(index)].absent)) : FALSE)
660
661#define UPL_PAGEOUT_PAGE(upl, index) \
662	(((upl)[(index)].phys_addr != 0) ? ((upl)[(index)].pageout) : FALSE)
663
664#define UPL_SET_PAGE_FREE_ON_COMMIT(upl, index) \
665	(((upl)[(index)].phys_addr != 0) ?	      \
666	 ((upl)[(index)].pageout = TRUE) : FALSE)
667
668#define UPL_CLR_PAGE_FREE_ON_COMMIT(upl, index) \
669	(((upl)[(index)].phys_addr != 0) ?       \
670	 ((upl)[(index)].pageout = FALSE) : FALSE)
671
672#define UPL_REPRIO_INFO_BLKNO(upl, index) \
673	(((upl)->upl_reprio_info[(index)]) & UPL_REPRIO_INFO_MASK)
674
675#define UPL_REPRIO_INFO_LEN(upl, index) \
676	((((upl)->upl_reprio_info[(index)]) >> UPL_REPRIO_INFO_SHIFT) & UPL_REPRIO_INFO_MASK)
677
678/* modifier macros for upl_t */
679
680#define UPL_SET_CS_VALIDATED(upl, index, value) \
681	((upl)[(index)].cs_validated = ((value) ? TRUE : FALSE))
682
683#define UPL_SET_CS_TAINTED(upl, index, value) \
684	((upl)[(index)].cs_tainted = ((value) ? TRUE : FALSE))
685
686#define UPL_SET_REPRIO_INFO(upl, index, blkno, len) \
687	((upl)->upl_reprio_info[(index)]) = (((uint64_t)(blkno) & UPL_REPRIO_INFO_MASK) | \
688	(((uint64_t)(len) & UPL_REPRIO_INFO_MASK) << UPL_REPRIO_INFO_SHIFT))
689
690/* The call prototyped below is used strictly by UPL_GET_INTERNAL_PAGE_LIST */
691
692extern vm_size_t	upl_offset_to_pagelist;
693extern vm_size_t 	upl_get_internal_pagelist_offset(void);
694extern void*		upl_get_internal_vectorupl(upl_t);
695extern upl_page_info_t*		upl_get_internal_vectorupl_pagelist(upl_t);
696
697/*Use this variant to get the UPL's page list iff:*/
698/*- the upl being passed in is already part of a vector UPL*/
699/*- the page list you want is that of this "sub-upl" and not that of the entire vector-upl*/
700
701#define UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(upl) \
702	((upl_page_info_t *)((upl_offset_to_pagelist == 0) ?  \
703	(uintptr_t)upl + (unsigned int)(upl_offset_to_pagelist = upl_get_internal_pagelist_offset()): \
704	(uintptr_t)upl + (unsigned int)upl_offset_to_pagelist))
705
706/* UPL_GET_INTERNAL_PAGE_LIST is only valid on internal objects where the */
707/* list request was made with the UPL_INTERNAL flag */
708
709
710#define UPL_GET_INTERNAL_PAGE_LIST(upl) \
711	((upl_get_internal_vectorupl(upl) != NULL ) ? (upl_get_internal_vectorupl_pagelist(upl)) : \
712	((upl_page_info_t *)((upl_offset_to_pagelist == 0) ?  \
713	(uintptr_t)upl + (unsigned int)(upl_offset_to_pagelist = upl_get_internal_pagelist_offset()): \
714	(uintptr_t)upl + (unsigned int)upl_offset_to_pagelist)))
715
716__BEGIN_DECLS
717
718extern ppnum_t	upl_phys_page(upl_page_info_t *upl, int index);
719extern boolean_t	upl_device_page(upl_page_info_t *upl);
720extern boolean_t	upl_speculative_page(upl_page_info_t *upl, int index);
721extern void	upl_clear_dirty(upl_t upl, boolean_t value);
722extern void	upl_set_referenced(upl_t upl, boolean_t value);
723extern void	upl_range_needed(upl_t upl, int index, int count);
724#if CONFIG_IOSCHED
725extern int64_t upl_blkno(upl_page_info_t *upl, int index);
726extern void 	upl_set_blkno(upl_t upl, vm_offset_t upl_offset, int size, int64_t blkno);
727#endif
728
729__END_DECLS
730
731#endif /* PRIVATE */
732
733__BEGIN_DECLS
734
735extern boolean_t	upl_page_present(upl_page_info_t *upl, int index);
736extern boolean_t	upl_dirty_page(upl_page_info_t *upl, int index);
737extern boolean_t	upl_valid_page(upl_page_info_t *upl, int index);
738extern void		upl_deallocate(upl_t upl);
739extern void 		upl_mark_decmp(upl_t upl);
740extern void 		upl_unmark_decmp(upl_t upl);
741
742__END_DECLS
743
744#endif  /* KERNEL */
745
746#endif	/* _MACH_MEMORY_OBJECT_TYPES_H_ */
747