1/*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
49 *  School of Computer Science
50 *  Carnegie Mellon University
51 *  Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 *	File:	vm_object.h
60 *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
61 *	Date:	1985
62 *
63 *	Virtual memory object module definitions.
64 */
65
66#ifndef	_VM_VM_OBJECT_H_
67#define _VM_VM_OBJECT_H_
68
69#include <mach_assert.h>
70#include <mach_pagemap.h>
71#include <task_swapper.h>
72
73#include <mach/kern_return.h>
74#include <mach/boolean.h>
75#include <mach/memory_object_types.h>
76#include <mach/port.h>
77#include <mach/vm_prot.h>
78#include <mach/vm_param.h>
79#include <mach/machine/vm_types.h>
80#include <kern/queue.h>
81#include <kern/lock.h>
82#include <kern/locks.h>
83#include <kern/assert.h>
84#include <kern/misc_protos.h>
85#include <kern/macro_help.h>
86#include <ipc/ipc_types.h>
87#include <vm/pmap.h>
88
89#include <kern/clock.h>
90
91#if	MACH_PAGEMAP
92#include <vm/vm_external.h>
93#endif	/* MACH_PAGEMAP */
94
95#include <vm/vm_options.h>
96
97struct vm_page;
98
99/*
100 *	Types defined:
101 *
102 *	vm_object_t		Virtual memory object.
103 *	vm_object_fault_info_t	Used to determine cluster size.
104 */
105
106struct vm_object_fault_info {
107	int		interruptible;
108        uint32_t	user_tag;
109        vm_size_t	cluster_size;
110        vm_behavior_t	behavior;
111        vm_map_offset_t	lo_offset;
112	vm_map_offset_t	hi_offset;
113	unsigned int
114	/* boolean_t */	no_cache:1,
115	/* boolean_t */	stealth:1,
116	/* boolean_t */	io_sync:1,
117	/* boolean_t */ cs_bypass:1,
118	/* boolean_t */	mark_zf_absent:1,
119	/* boolean_t */ batch_pmap_op:1,
120		__vm_object_fault_info_unused_bits:26;
121};
122
123
124#define	vo_size			vo_un1.vou_size
125#define vo_cache_pages_to_scan	vo_un1.vou_cache_pages_to_scan
126#define vo_shadow_offset	vo_un2.vou_shadow_offset
127#define vo_cache_ts		vo_un2.vou_cache_ts
128
129struct vm_object {
130	queue_head_t		memq;		/* Resident memory */
131        lck_rw_t		Lock;		/* Synchronization */
132
133	union {
134		vm_object_size_t  vou_size;	/* Object size (only valid if internal) */
135		int		  vou_cache_pages_to_scan;	/* pages yet to be visited in an
136								 * external object in cache
137								 */
138	} vo_un1;
139
140	struct vm_page		*memq_hint;
141	int			ref_count;	/* Number of references */
142#if	TASK_SWAPPER
143	int			res_count;	/* Residency references (swap)*/
144#endif	/* TASK_SWAPPER */
145	unsigned int		resident_page_count;
146						/* number of resident pages */
147	unsigned int		wired_page_count; /* number of wired pages */
148	unsigned int		reusable_page_count;
149
150	struct vm_object	*copy;		/* Object that should receive
151						 * a copy of my changed pages,
152						 * for copy_delay, or just the
153						 * temporary object that
154						 * shadows this object, for
155						 * copy_call.
156						 */
157	struct vm_object	*shadow;	/* My shadow */
158
159	union {
160		vm_object_offset_t vou_shadow_offset;	/* Offset into shadow */
161		clock_sec_t	   vou_cache_ts;	/* age of an external object
162							 * present in cache
163							 */
164	} vo_un2;
165
166	memory_object_t		pager;		/* Where to get data */
167	vm_object_offset_t	paging_offset;	/* Offset into memory object */
168	memory_object_control_t	pager_control;	/* Where data comes back */
169
170	memory_object_copy_strategy_t
171				copy_strategy;	/* How to handle data copy */
172
173	short			paging_in_progress;
174						/* The memory object ports are
175						 * being used (e.g., for pagein
176						 * or pageout) -- don't change
177						 * any of these fields (i.e.,
178						 * don't collapse, destroy or
179						 * terminate)
180						 */
181	short			activity_in_progress;
182
183	unsigned int
184	/* boolean_t array */	all_wanted:11,	/* Bit array of "want to be
185						 * awakened" notations.  See
186						 * VM_OBJECT_EVENT_* items
187						 * below */
188	/* boolean_t */	pager_created:1,	/* Has pager been created? */
189	/* boolean_t */	pager_initialized:1,	/* Are fields ready to use? */
190	/* boolean_t */	pager_ready:1,		/* Will pager take requests? */
191
192	/* boolean_t */		pager_trusted:1,/* The pager for this object
193						 * is trusted. This is true for
194						 * all internal objects (backed
195						 * by the default pager)
196						 */
197	/* boolean_t */		can_persist:1,	/* The kernel may keep the data
198						 * for this object (and rights
199						 * to the memory object) after
200						 * all address map references
201						 * are deallocated?
202						 */
203	/* boolean_t */		internal:1,	/* Created by the kernel (and
204						 * therefore, managed by the
205						 * default memory manger)
206						 */
207	/* boolean_t */		temporary:1,	/* Permanent objects may be
208						 * changed externally by the
209						 * memory manager, and changes
210						 * made in memory must be
211						 * reflected back to the memory
212						 * manager.  Temporary objects
213						 * lack both of these
214						 * characteristics.
215						 */
216	/* boolean_t */		private:1,	/* magic device_pager object,
217						 * holds private pages only */
218	/* boolean_t */		pageout:1,	/* pageout object. contains
219						 * private pages that refer to
220						 * a real memory object. */
221	/* boolean_t */		alive:1,	/* Not yet terminated */
222
223	/* boolean_t */		purgable:2,	/* Purgable state.  See
224						 * VM_PURGABLE_*
225						 */
226	/* boolean_t */		shadowed:1,	/* Shadow may exist */
227	/* boolean_t */		silent_overwrite:1,
228						/* Allow full page overwrite
229						 * without data_request if
230						 * page is absent */
231	/* boolean_t */		advisory_pageout:1,
232						/* Instead of sending page
233						 * via OOL, just notify
234						 * pager that the kernel
235						 * wants to discard it, page
236						 * remains in object */
237	/* boolean_t */		true_share:1,
238						/* This object is mapped
239						 * in more than one place
240						 * and hence cannot be
241						 * coalesced */
242	/* boolean_t */		terminating:1,
243						/* Allows vm_object_lookup
244						 * and vm_object_deallocate
245						 * to special case their
246						 * behavior when they are
247						 * called as a result of
248						 * page cleaning during
249						 * object termination
250						 */
251	/* boolean_t */		named:1,	/* An enforces an internal
252						 * naming convention, by
253						 * calling the right routines
254						 * for allocation and
255						 * destruction, UBC references
256						 * against the vm_object are
257						 * checked.
258						 */
259	/* boolean_t */		shadow_severed:1,
260						/* When a permanent object
261						 * backing a COW goes away
262					  	 * unexpectedly.  This bit
263						 * allows vm_fault to return
264						 * an error rather than a
265						 * zero filled page.
266						 */
267	/* boolean_t */		phys_contiguous:1,
268						/* Memory is wired and
269						 * guaranteed physically
270						 * contiguous.  However
271						 * it is not device memory
272						 * and obeys normal virtual
273						 * memory rules w.r.t pmap
274						 * access bits.
275						 */
276	/* boolean_t */		nophyscache:1;
277						/* When mapped at the
278						 * pmap level, don't allow
279						 * primary caching. (for
280						 * I/O)
281						 */
282
283
284
285	queue_chain_t		cached_list;	/* Attachment point for the
286						 * list of objects cached as a
287						 * result of their can_persist
288						 * value
289						 */
290
291	queue_head_t		msr_q;		/* memory object synchronise
292						   request queue */
293
294  /*
295   * the following fields are not protected by any locks
296   * they are updated via atomic compare and swap
297   */
298	vm_object_offset_t	last_alloc;	/* last allocation offset */
299	int			sequential;	/* sequential access size */
300
301        uint32_t		pages_created;
302        uint32_t		pages_used;
303#if	MACH_PAGEMAP
304	vm_external_map_t	existence_map;	/* bitmap of pages written to
305						 * backing storage */
306#endif	/* MACH_PAGEMAP */
307	vm_offset_t		cow_hint;	/* last page present in     */
308						/* shadow but not in object */
309#if	MACH_ASSERT
310	struct vm_object	*paging_object;	/* object which pages to be
311						 * swapped out are temporary
312						 * put in current object
313						 */
314#endif
315	/* hold object lock when altering */
316	unsigned	int
317		wimg_bits:8,	        /* cache WIMG bits         */
318		code_signed:1,		/* pages are signed and should be
319					   validated; the signatures are stored
320					   with the pager */
321		hashed:1,		/* object/pager entered in hash */
322		transposed:1,		/* object was transposed with another */
323		mapping_in_progress:1,	/* pager being mapped/unmapped */
324		volatile_empty:1,
325		volatile_fault:1,
326		all_reusable:1,
327		blocked_access:1,
328		set_cache_attr:1,
329		__object2_unused_bits:15;	/* for expansion */
330
331	uint32_t		scan_collisions;
332
333#if	UPL_DEBUG
334	queue_head_t		uplq;		/* List of outstanding upls */
335#endif /* UPL_DEBUG */
336
337#ifdef	VM_PIP_DEBUG
338/*
339 * Keep track of the stack traces for the first holders
340 * of a "paging_in_progress" reference for this VM object.
341 */
342#define VM_PIP_DEBUG_STACK_FRAMES	25	/* depth of each stack trace */
343#define VM_PIP_DEBUG_MAX_REFS		10	/* track that many references */
344	struct __pip_backtrace {
345		void *pip_retaddr[VM_PIP_DEBUG_STACK_FRAMES];
346	} pip_holders[VM_PIP_DEBUG_MAX_REFS];
347#endif	/* VM_PIP_DEBUG  */
348
349        queue_chain_t		objq;      /* object queue - currently used for purgable queues */
350};
351
352#define VM_OBJECT_PURGEABLE_FAULT_ERROR(object)				\
353	((object)->volatile_fault &&					\
354	 ((object)->purgable == VM_PURGABLE_VOLATILE ||			\
355	  (object)->purgable == VM_PURGABLE_EMPTY))
356
357#define VM_PAGE_REMOVE(page)						\
358	MACRO_BEGIN							\
359	vm_page_t __page = (page);					\
360	vm_object_t __object = __page->object;				\
361	if (__page == __object->memq_hint) {				\
362		vm_page_t	__new_hint;				\
363		queue_entry_t	__qe;					\
364		__qe = queue_next(&__page->listq);			\
365		if (queue_end(&__object->memq, __qe)) {			\
366			__qe = queue_prev(&__page->listq);		\
367			if (queue_end(&__object->memq, __qe)) {		\
368				__qe = NULL;				\
369			}						\
370		}							\
371		__new_hint = (vm_page_t) __qe;				\
372		__object->memq_hint = __new_hint;			\
373	}								\
374	queue_remove(&__object->memq, __page, vm_page_t, listq);	\
375	MACRO_END
376
377#define VM_PAGE_INSERT(page, object)				\
378	MACRO_BEGIN						\
379	vm_page_t __page = (page);				\
380	vm_object_t __object = (object);			\
381	queue_enter(&__object->memq, __page, vm_page_t, listq); \
382	__object->memq_hint = __page;				\
383	MACRO_END
384
385__private_extern__
386vm_object_t	kernel_object;		/* the single kernel object */
387
388__private_extern__
389unsigned int	vm_object_absent_max;	/* maximum number of absent pages
390					   at a time for each object */
391
392# define	VM_MSYNC_INITIALIZED			0
393# define	VM_MSYNC_SYNCHRONIZING			1
394# define	VM_MSYNC_DONE				2
395
396struct msync_req {
397	queue_chain_t		msr_q;		/* object request queue */
398	queue_chain_t		req_q;		/* vm_msync request queue */
399	unsigned int		flag;
400	vm_object_offset_t	offset;
401	vm_object_size_t	length;
402	vm_object_t		object;		/* back pointer */
403	decl_lck_mtx_data(,	msync_req_lock)	/* Lock for this structure */
404};
405
406typedef struct msync_req	*msync_req_t;
407#define MSYNC_REQ_NULL		((msync_req_t) 0)
408
409
410extern lck_grp_t		vm_map_lck_grp;
411extern lck_attr_t		vm_map_lck_attr;
412
413/*
414 * Macros to allocate and free msync_reqs
415 */
416#define msync_req_alloc(msr)						\
417    MACRO_BEGIN							\
418        (msr) = (msync_req_t)kalloc(sizeof(struct msync_req));		\
419        lck_mtx_init(&(msr)->msync_req_lock, &vm_map_lck_grp, &vm_map_lck_attr);		\
420        msr->flag = VM_MSYNC_INITIALIZED;				\
421    MACRO_END
422
423#define msync_req_free(msr)						\
424	(kfree((msr), sizeof(struct msync_req)))
425
426#define msr_lock(msr)   lck_mtx_lock(&(msr)->msync_req_lock)
427#define msr_unlock(msr) lck_mtx_unlock(&(msr)->msync_req_lock)
428
429/*
430 *	Declare procedures that operate on VM objects.
431 */
432
433__private_extern__ void		vm_object_bootstrap(void) __attribute__((section("__TEXT, initcode")));
434
435__private_extern__ void		vm_object_init(void);
436
437__private_extern__ void		vm_object_init_lck_grp(void);
438
439__private_extern__ void		vm_object_reaper_init(void);
440
441__private_extern__ vm_object_t	vm_object_allocate(
442					vm_object_size_t	size);
443
444__private_extern__ void    _vm_object_allocate(vm_object_size_t size,
445			    vm_object_t object);
446
447#if	TASK_SWAPPER
448
449__private_extern__ void	vm_object_res_reference(
450				vm_object_t 		object);
451__private_extern__ void	vm_object_res_deallocate(
452				vm_object_t		object);
453#define	VM_OBJ_RES_INCR(object)	(object)->res_count++
454#define	VM_OBJ_RES_DECR(object)	(object)->res_count--
455
456#else	/* TASK_SWAPPER */
457
458#define	VM_OBJ_RES_INCR(object)
459#define	VM_OBJ_RES_DECR(object)
460#define vm_object_res_reference(object)
461#define vm_object_res_deallocate(object)
462
463#endif	/* TASK_SWAPPER */
464
465#define vm_object_reference_locked(object)		\
466	MACRO_BEGIN					\
467	vm_object_t RLObject = (object);		\
468	vm_object_lock_assert_exclusive(object);	\
469	assert((RLObject)->ref_count > 0);		\
470	(RLObject)->ref_count++;			\
471	assert((RLObject)->ref_count > 1);		\
472	vm_object_res_reference(RLObject);		\
473	MACRO_END
474
475
476#define vm_object_reference_shared(object)				\
477	MACRO_BEGIN							\
478	vm_object_t RLObject = (object);				\
479	vm_object_lock_assert_shared(object);				\
480	assert((RLObject)->ref_count > 0);				\
481	OSAddAtomic(1, &(RLObject)->ref_count);		\
482	assert((RLObject)->ref_count > 0);				\
483	/* XXX we would need an atomic version of the following ... */	\
484	vm_object_res_reference(RLObject);				\
485	MACRO_END
486
487
488__private_extern__ void		vm_object_reference(
489					vm_object_t	object);
490
491#if	!MACH_ASSERT
492
493#define	vm_object_reference(object)			\
494MACRO_BEGIN						\
495	vm_object_t RObject = (object);			\
496	if (RObject) {					\
497		vm_object_lock_shared(RObject);		\
498		vm_object_reference_shared(RObject);	\
499		vm_object_unlock(RObject);		\
500	}						\
501MACRO_END
502
503#endif	/* MACH_ASSERT */
504
505__private_extern__ void		vm_object_deallocate(
506					vm_object_t	object);
507
508__private_extern__ kern_return_t vm_object_release_name(
509					vm_object_t	object,
510					int		flags);
511
512__private_extern__ void		vm_object_pmap_protect(
513					vm_object_t		object,
514					vm_object_offset_t	offset,
515					vm_object_size_t	size,
516					pmap_t			pmap,
517					vm_map_offset_t		pmap_start,
518					vm_prot_t		prot);
519
520__private_extern__ void		vm_object_page_remove(
521					vm_object_t		object,
522					vm_object_offset_t	start,
523					vm_object_offset_t	end);
524
525__private_extern__ void		vm_object_deactivate_pages(
526					vm_object_t		object,
527					vm_object_offset_t	offset,
528					vm_object_size_t	size,
529					boolean_t               kill_page,
530					boolean_t		reusable_page);
531
532__private_extern__ void	vm_object_reuse_pages(
533	vm_object_t		object,
534	vm_object_offset_t	start_offset,
535	vm_object_offset_t	end_offset,
536	boolean_t		allow_partial_reuse);
537
538__private_extern__ void		vm_object_purge(
539					vm_object_t		object);
540
541__private_extern__ kern_return_t vm_object_purgable_control(
542	vm_object_t	object,
543	vm_purgable_t	control,
544	int		*state);
545
546__private_extern__ boolean_t	vm_object_coalesce(
547					vm_object_t		prev_object,
548					vm_object_t		next_object,
549					vm_object_offset_t	prev_offset,
550					vm_object_offset_t	next_offset,
551					vm_object_size_t	prev_size,
552					vm_object_size_t	next_size);
553
554__private_extern__ boolean_t	vm_object_shadow(
555					vm_object_t		*object,
556					vm_object_offset_t	*offset,
557					vm_object_size_t	length);
558
559__private_extern__ void		vm_object_collapse(
560					vm_object_t		object,
561					vm_object_offset_t	offset,
562					boolean_t		can_bypass);
563
564__private_extern__ boolean_t	vm_object_copy_quickly(
565				vm_object_t		*_object,
566				vm_object_offset_t	src_offset,
567				vm_object_size_t	size,
568				boolean_t		*_src_needs_copy,
569				boolean_t		*_dst_needs_copy);
570
571__private_extern__ kern_return_t	vm_object_copy_strategically(
572				vm_object_t		src_object,
573				vm_object_offset_t	src_offset,
574				vm_object_size_t	size,
575				vm_object_t		*dst_object,
576				vm_object_offset_t	*dst_offset,
577				boolean_t		*dst_needs_copy);
578
579__private_extern__ kern_return_t	vm_object_copy_slowly(
580				vm_object_t		src_object,
581				vm_object_offset_t	src_offset,
582				vm_object_size_t	size,
583				boolean_t		interruptible,
584				vm_object_t		*_result_object);
585
586__private_extern__ vm_object_t	vm_object_copy_delayed(
587				vm_object_t		src_object,
588				vm_object_offset_t	src_offset,
589				vm_object_size_t	size,
590				boolean_t		src_object_shared);
591
592
593
594__private_extern__ kern_return_t	vm_object_destroy(
595					vm_object_t	object,
596					kern_return_t	reason);
597
598__private_extern__ void		vm_object_pager_create(
599					vm_object_t	object);
600
601__private_extern__ void		vm_object_page_map(
602				vm_object_t	object,
603				vm_object_offset_t	offset,
604				vm_object_size_t	size,
605				vm_object_offset_t	(*map_fn)
606					(void *, vm_object_offset_t),
607					void 		*map_fn_data);
608
609__private_extern__ kern_return_t vm_object_upl_request(
610				vm_object_t		object,
611				vm_object_offset_t	offset,
612				upl_size_t		size,
613				upl_t			*upl,
614				upl_page_info_t		*page_info,
615				unsigned int		*count,
616				int			flags);
617
618__private_extern__ kern_return_t vm_object_transpose(
619				vm_object_t		object1,
620				vm_object_t		object2,
621				vm_object_size_t	transpose_size);
622
623__private_extern__ boolean_t vm_object_sync(
624				vm_object_t		object,
625				vm_object_offset_t	offset,
626				vm_object_size_t	size,
627				boolean_t		should_flush,
628				boolean_t		should_return,
629				boolean_t		should_iosync);
630
631__private_extern__ kern_return_t vm_object_update(
632				vm_object_t		object,
633				vm_object_offset_t	offset,
634				vm_object_size_t	size,
635				vm_object_offset_t	*error_offset,
636				int			*io_errno,
637				memory_object_return_t	should_return,
638				int			flags,
639				vm_prot_t		prot);
640
641__private_extern__ kern_return_t vm_object_lock_request(
642				vm_object_t		object,
643				vm_object_offset_t	offset,
644				vm_object_size_t	size,
645				memory_object_return_t	should_return,
646				int			flags,
647				vm_prot_t		prot);
648
649
650
651__private_extern__ vm_object_t	vm_object_enter(
652					memory_object_t		pager,
653					vm_object_size_t	size,
654					boolean_t		internal,
655					boolean_t		init,
656					boolean_t		check_named);
657
658
659__private_extern__ void	vm_object_cluster_size(
660					vm_object_t		object,
661					vm_object_offset_t	*start,
662					vm_size_t		*length,
663					vm_object_fault_info_t  fault_info,
664					uint32_t		*io_streaming);
665
666__private_extern__ kern_return_t vm_object_populate_with_private(
667	vm_object_t		object,
668	vm_object_offset_t	offset,
669	ppnum_t			phys_page,
670	vm_size_t		size);
671
672__private_extern__ void vm_object_change_wimg_mode(
673	vm_object_t		object,
674	unsigned int		wimg_mode);
675
676extern kern_return_t adjust_vm_object_cache(
677	vm_size_t oval,
678	vm_size_t nval);
679
680extern kern_return_t vm_object_page_op(
681	vm_object_t		object,
682	vm_object_offset_t	offset,
683	int			ops,
684	ppnum_t			*phys_entry,
685	int			*flags);
686
687extern kern_return_t vm_object_range_op(
688	vm_object_t		object,
689	vm_object_offset_t	offset_beg,
690	vm_object_offset_t	offset_end,
691	int                     ops,
692	uint32_t		*range);
693
694
695__private_extern__ void		vm_object_reap_pages(
696	                                vm_object_t object,
697					int	reap_type);
698#define REAP_REAP	0
699#define	REAP_TERMINATE	1
700#define REAP_PURGEABLE	2
701#define REAP_DATA_FLUSH	3
702
703#if CONFIG_FREEZE
704struct default_freezer_handle;
705
706__private_extern__ kern_return_t
707vm_object_pack(
708	unsigned int		*purgeable_count,
709	unsigned int		*wired_count,
710	unsigned int		*clean_count,
711	unsigned int		*dirty_count,
712	unsigned int		dirty_budget,
713	boolean_t		*shared,
714	vm_object_t		src_object,
715	struct default_freezer_handle *df_handle);
716
717__private_extern__ void
718vm_object_pack_pages(
719	unsigned int		*wired_count,
720	unsigned int		*clean_count,
721	unsigned int		*dirty_count,
722	unsigned int		dirty_budget,
723	vm_object_t		src_object,
724	struct default_freezer_handle *df_handle);
725
726__private_extern__ void
727vm_object_pageout(
728	vm_object_t     object);
729
730__private_extern__  kern_return_t
731vm_object_pagein(
732	vm_object_t     object);
733#endif /* CONFIG_FREEZE */
734
735/*
736 *	Event waiting handling
737 */
738
739#define	VM_OBJECT_EVENT_INITIALIZED		0
740#define	VM_OBJECT_EVENT_PAGER_READY		1
741#define	VM_OBJECT_EVENT_PAGING_IN_PROGRESS	2
742#define	VM_OBJECT_EVENT_MAPPING_IN_PROGRESS	3
743#define	VM_OBJECT_EVENT_LOCK_IN_PROGRESS	4
744#define	VM_OBJECT_EVENT_UNCACHING		5
745#define	VM_OBJECT_EVENT_COPY_CALL		6
746#define	VM_OBJECT_EVENT_CACHING			7
747#define VM_OBJECT_EVENT_UNBLOCKED		8
748#define VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS	9
749
750#define	vm_object_assert_wait(object, event, interruptible)		\
751	(((object)->all_wanted |= 1 << (event)),			\
752	 assert_wait((event_t)((vm_offset_t)(object)+(event)),(interruptible)))
753
754#define	vm_object_wait(object, event, interruptible)			\
755	(vm_object_assert_wait((object),(event),(interruptible)),	\
756	vm_object_unlock(object),					\
757	thread_block(THREAD_CONTINUE_NULL))				\
758
759#define thread_sleep_vm_object(object, event, interruptible)		\
760        lck_rw_sleep(&(object)->Lock, LCK_SLEEP_DEFAULT, (event_t)(event), (interruptible))
761
762#define vm_object_sleep(object, event, interruptible)			\
763	(((object)->all_wanted |= 1 << (event)),			\
764	 thread_sleep_vm_object((object), 				\
765		((vm_offset_t)(object)+(event)), (interruptible)))
766
767#define	vm_object_wakeup(object, event)					\
768	MACRO_BEGIN							\
769	if ((object)->all_wanted & (1 << (event)))			\
770		thread_wakeup((event_t)((vm_offset_t)(object) + (event))); \
771	(object)->all_wanted &= ~(1 << (event));			\
772	MACRO_END
773
774#define	vm_object_set_wanted(object, event)				\
775	MACRO_BEGIN							\
776	((object)->all_wanted |= (1 << (event)));			\
777	MACRO_END
778
779#define	vm_object_wanted(object, event)					\
780	((object)->all_wanted & (1 << (event)))
781
782/*
783 *	Routines implemented as macros
784 */
785#ifdef VM_PIP_DEBUG
786#include <libkern/OSDebug.h>
787#define VM_PIP_DEBUG_BEGIN(object)					\
788	MACRO_BEGIN							\
789	int pip = ((object)->paging_in_progress +			\
790		   (object)->activity_in_progress);			\
791	if (pip < VM_PIP_DEBUG_MAX_REFS) {				\
792		(void) OSBacktrace(&(object)->pip_holders[pip].pip_retaddr[0], \
793				   VM_PIP_DEBUG_STACK_FRAMES);		\
794	}								\
795	MACRO_END
796#else	/* VM_PIP_DEBUG */
797#define VM_PIP_DEBUG_BEGIN(object)
798#endif	/* VM_PIP_DEBUG */
799
800#define		vm_object_activity_begin(object)			\
801	MACRO_BEGIN							\
802	vm_object_lock_assert_exclusive((object));			\
803	assert((object)->paging_in_progress >= 0);			\
804	VM_PIP_DEBUG_BEGIN((object));					\
805	(object)->activity_in_progress++;				\
806	MACRO_END
807
808#define		vm_object_activity_end(object)				\
809	MACRO_BEGIN							\
810	vm_object_lock_assert_exclusive((object));			\
811	assert((object)->activity_in_progress > 0);			\
812	(object)->activity_in_progress--;				\
813	if ((object)->paging_in_progress == 0 &&			\
814	    (object)->activity_in_progress == 0)			\
815		vm_object_wakeup((object),				\
816				 VM_OBJECT_EVENT_PAGING_IN_PROGRESS);	\
817	MACRO_END
818
819#define		vm_object_paging_begin(object)				\
820	MACRO_BEGIN							\
821	vm_object_lock_assert_exclusive((object));			\
822	assert((object)->paging_in_progress >= 0);			\
823	VM_PIP_DEBUG_BEGIN((object));					\
824	(object)->paging_in_progress++;					\
825	MACRO_END
826
827#define		vm_object_paging_end(object)				\
828	MACRO_BEGIN							\
829	vm_object_lock_assert_exclusive((object));			\
830	assert((object)->paging_in_progress > 0);			\
831	(object)->paging_in_progress--;					\
832	if ((object)->paging_in_progress == 0) {			\
833		vm_object_wakeup((object),				\
834				 VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS); \
835		if ((object)->activity_in_progress == 0)		\
836			vm_object_wakeup((object),			\
837					 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
838	}								\
839	MACRO_END
840
841#define		vm_object_paging_wait(object, interruptible)		\
842	MACRO_BEGIN							\
843	vm_object_lock_assert_exclusive((object));			\
844	while ((object)->paging_in_progress != 0 ||			\
845	       (object)->activity_in_progress != 0) {			\
846		wait_result_t  _wr;					\
847									\
848		_wr = vm_object_sleep((object),				\
849				VM_OBJECT_EVENT_PAGING_IN_PROGRESS,	\
850				(interruptible));			\
851									\
852		/*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\
853			/*XXX break; */					\
854	}								\
855	MACRO_END
856
857#define vm_object_paging_only_wait(object, interruptible)		\
858	MACRO_BEGIN							\
859	vm_object_lock_assert_exclusive((object));			\
860	while ((object)->paging_in_progress != 0) {			\
861		wait_result_t  _wr;					\
862									\
863		_wr = vm_object_sleep((object),				\
864				VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS,\
865				(interruptible));			\
866									\
867		/*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\
868			/*XXX break; */					\
869	}								\
870	MACRO_END
871
872
873#define vm_object_mapping_begin(object) 				\
874	MACRO_BEGIN							\
875	vm_object_lock_assert_exclusive((object));			\
876	assert(! (object)->mapping_in_progress);			\
877	(object)->mapping_in_progress = TRUE;				\
878	MACRO_END
879
880#define vm_object_mapping_end(object)					\
881	MACRO_BEGIN							\
882	vm_object_lock_assert_exclusive((object));			\
883	assert((object)->mapping_in_progress);				\
884	(object)->mapping_in_progress = FALSE;				\
885	vm_object_wakeup((object),					\
886			 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS);		\
887	MACRO_END
888
889#define vm_object_mapping_wait(object, interruptible)			\
890	MACRO_BEGIN							\
891	vm_object_lock_assert_exclusive((object));			\
892	while ((object)->mapping_in_progress) {				\
893		wait_result_t	_wr;					\
894									\
895		_wr = vm_object_sleep((object),				\
896				      VM_OBJECT_EVENT_MAPPING_IN_PROGRESS, \
897				      (interruptible));			\
898		/*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\
899			/*XXX break; */					\
900	}								\
901	assert(!(object)->mapping_in_progress);				\
902	MACRO_END
903
904
905
906#define OBJECT_LOCK_SHARED	0
907#define OBJECT_LOCK_EXCLUSIVE	1
908
909extern lck_grp_t	vm_object_lck_grp;
910extern lck_grp_attr_t	vm_object_lck_grp_attr;
911extern lck_attr_t	vm_object_lck_attr;
912extern lck_attr_t	kernel_object_lck_attr;
913
914extern vm_object_t	vm_pageout_scan_wants_object;
915
916extern void		vm_object_lock(vm_object_t);
917extern boolean_t	vm_object_lock_try(vm_object_t);
918extern boolean_t	_vm_object_lock_try(vm_object_t);
919extern boolean_t	vm_object_lock_avoid(vm_object_t);
920extern void		vm_object_lock_shared(vm_object_t);
921extern boolean_t	vm_object_lock_try_shared(vm_object_t);
922
923/*
924 *	Object locking macros
925 */
926
927#define vm_object_lock_init(object)					\
928	lck_rw_init(&(object)->Lock, &vm_object_lck_grp,		\
929		    (((object) == kernel_object ||			\
930		      (object) == vm_submap_object) ?			\
931		     &kernel_object_lck_attr :				\
932		     &vm_object_lck_attr))
933#define vm_object_lock_destroy(object)	lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp)
934
935#define vm_object_unlock(object)	lck_rw_done(&(object)->Lock)
936#define vm_object_lock_upgrade(object)	lck_rw_lock_shared_to_exclusive(&(object)->Lock)
937#define vm_object_lock_try_scan(object)	_vm_object_lock_try(object)
938
939/*
940 * CAUTION: the following vm_object_lock_assert_held*() macros merely
941 * check if anyone is holding the lock, but the holder may not necessarily
942 * be the caller...
943 */
944#if MACH_ASSERT || DEBUG
945#define vm_object_lock_assert_held(object) \
946	lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_HELD)
947#define vm_object_lock_assert_shared(object)	\
948	lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_SHARED)
949#define vm_object_lock_assert_exclusive(object) \
950	lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE)
951#else  /* MACH_ASSERT || DEBUG */
952#define vm_object_lock_assert_held(object)
953#define vm_object_lock_assert_shared(object)
954#define vm_object_lock_assert_exclusive(object)
955#endif /* MACH_ASSERT || DEBUG */
956
957#define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
958#define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK))
959
960extern void	vm_object_cache_add(vm_object_t);
961extern void	vm_object_cache_remove(vm_object_t);
962extern int	vm_object_cache_evict(int, int);
963
964#endif	/* _VM_VM_OBJECT_H_ */
965