1/*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
49 *  School of Computer Science
50 *  Carnegie Mellon University
51 *  Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 *	File:	vm_object.h
60 *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
61 *	Date:	1985
62 *
63 *	Virtual memory object module definitions.
64 */
65
66#ifndef	_VM_VM_OBJECT_H_
67#define _VM_VM_OBJECT_H_
68
69#include <debug.h>
70#include <mach_assert.h>
71#include <mach_pagemap.h>
72#include <task_swapper.h>
73
74#include <mach/kern_return.h>
75#include <mach/boolean.h>
76#include <mach/memory_object_types.h>
77#include <mach/port.h>
78#include <mach/vm_prot.h>
79#include <mach/vm_param.h>
80#include <mach/machine/vm_types.h>
81#include <kern/queue.h>
82#include <kern/locks.h>
83#include <kern/assert.h>
84#include <kern/misc_protos.h>
85#include <kern/macro_help.h>
86#include <ipc/ipc_types.h>
87#include <vm/pmap.h>
88
89#include <vm/vm_external.h>
90
91#include <vm/vm_options.h>
92
93#if VM_OBJECT_TRACKING
94#include <libkern/OSDebug.h>
95#include <kern/btlog.h>
96extern void vm_object_tracking_init(void);
97extern boolean_t vm_object_tracking_inited;
98extern btlog_t *vm_object_tracking_btlog;
99#define VM_OBJECT_TRACKING_BTDEPTH 7
100#define VM_OBJECT_TRACKING_OP_CREATED	1
101#define VM_OBJECT_TRACKING_OP_MODIFIED	2
102#define VM_OBJECT_TRACKING_OP_TRUESHARE	3
103#endif /* VM_OBJECT_TRACKING */
104
105struct vm_page;
106struct vm_shared_region_slide_info;
107
108/*
109 *	Types defined:
110 *
111 *	vm_object_t		Virtual memory object.
112 *	vm_object_fault_info_t	Used to determine cluster size.
113 */
114
115struct vm_object_fault_info {
116	int		interruptible;
117        uint32_t	user_tag;
118        vm_size_t	cluster_size;
119        vm_behavior_t	behavior;
120        vm_map_offset_t	lo_offset;
121	vm_map_offset_t	hi_offset;
122	unsigned int
123	/* boolean_t */	no_cache:1,
124	/* boolean_t */	stealth:1,
125	/* boolean_t */	io_sync:1,
126	/* boolean_t */ cs_bypass:1,
127	/* boolean_t */	mark_zf_absent:1,
128	/* boolean_t */ batch_pmap_op:1,
129		__vm_object_fault_info_unused_bits:26;
130	int		pmap_options;
131};
132
133
134#define	vo_size				vo_un1.vou_size
135#define vo_cache_pages_to_scan		vo_un1.vou_cache_pages_to_scan
136#define vo_shadow_offset		vo_un2.vou_shadow_offset
137#define vo_cache_ts			vo_un2.vou_cache_ts
138#define vo_purgeable_owner		vo_un2.vou_purgeable_owner
139#define vo_slide_info			vo_un2.vou_slide_info
140
141struct vm_object {
142	queue_head_t		memq;		/* Resident memory */
143        lck_rw_t		Lock;		/* Synchronization */
144
145	union {
146		vm_object_size_t  vou_size;	/* Object size (only valid if internal) */
147		int		  vou_cache_pages_to_scan;	/* pages yet to be visited in an
148								 * external object in cache
149								 */
150	} vo_un1;
151
152	struct vm_page		*memq_hint;
153	int			ref_count;	/* Number of references */
154#if	TASK_SWAPPER
155	int			res_count;	/* Residency references (swap)*/
156#endif	/* TASK_SWAPPER */
157	unsigned int		resident_page_count;
158						/* number of resident pages */
159	unsigned int		wired_page_count; /* number of wired pages */
160	unsigned int		reusable_page_count;
161
162	struct vm_object	*copy;		/* Object that should receive
163						 * a copy of my changed pages,
164						 * for copy_delay, or just the
165						 * temporary object that
166						 * shadows this object, for
167						 * copy_call.
168						 */
169	struct vm_object	*shadow;	/* My shadow */
170
171	union {
172		vm_object_offset_t vou_shadow_offset;	/* Offset into shadow */
173		clock_sec_t	vou_cache_ts;	/* age of an external object
174						 * present in cache
175						 */
176		task_t		vou_purgeable_owner;	/* If the purg'a'ble bits below are set
177							 * to volatile/emtpy, this is the task
178							 * that owns this purgeable object.
179							 */
180		struct vm_shared_region_slide_info *vou_slide_info;
181	} vo_un2;
182
183	memory_object_t		pager;		/* Where to get data */
184	vm_object_offset_t	paging_offset;	/* Offset into memory object */
185	memory_object_control_t	pager_control;	/* Where data comes back */
186
187	memory_object_copy_strategy_t
188				copy_strategy;	/* How to handle data copy */
189
190#if __LP64__
191	/*
192	 * Some user processes (mostly VirtualMachine software) take a large
193	 * number of UPLs (via IOMemoryDescriptors) to wire pages in large
194	 * VM objects and overflow the 16-bit "activity_in_progress" counter.
195	 * Since we never enforced any limit there, let's give them 32 bits
196	 * for backwards compatibility's sake.
197	 */
198	unsigned int		paging_in_progress:16,
199				__object1_unused_bits:16;
200	unsigned int		activity_in_progress;
201#else /* __LP64__ */
202	/*
203	 * On 32-bit platforms, enlarging "activity_in_progress" would increase
204	 * the size of "struct vm_object".  Since we don't know of any actual
205	 * overflow of these counters on these platforms, let's keep the
206	 * counters as 16-bit integers.
207	 */
208	unsigned short		paging_in_progress;
209	unsigned short		activity_in_progress;
210#endif /* __LP64__ */
211						/* The memory object ports are
212						 * being used (e.g., for pagein
213						 * or pageout) -- don't change
214						 * any of these fields (i.e.,
215						 * don't collapse, destroy or
216						 * terminate)
217						 */
218
219	unsigned int
220	/* boolean_t array */	all_wanted:11,	/* Bit array of "want to be
221						 * awakened" notations.  See
222						 * VM_OBJECT_EVENT_* items
223						 * below */
224	/* boolean_t */	pager_created:1,	/* Has pager been created? */
225	/* boolean_t */	pager_initialized:1,	/* Are fields ready to use? */
226	/* boolean_t */	pager_ready:1,		/* Will pager take requests? */
227
228	/* boolean_t */		pager_trusted:1,/* The pager for this object
229						 * is trusted. This is true for
230						 * all internal objects (backed
231						 * by the default pager)
232						 */
233	/* boolean_t */		can_persist:1,	/* The kernel may keep the data
234						 * for this object (and rights
235						 * to the memory object) after
236						 * all address map references
237						 * are deallocated?
238						 */
239	/* boolean_t */		internal:1,	/* Created by the kernel (and
240						 * therefore, managed by the
241						 * default memory manger)
242						 */
243	/* boolean_t */		temporary:1,	/* Permanent objects may be
244						 * changed externally by the
245						 * memory manager, and changes
246						 * made in memory must be
247						 * reflected back to the memory
248						 * manager.  Temporary objects
249						 * lack both of these
250						 * characteristics.
251						 */
252	/* boolean_t */		private:1,	/* magic device_pager object,
253						 * holds private pages only */
254	/* boolean_t */		pageout:1,	/* pageout object. contains
255						 * private pages that refer to
256						 * a real memory object. */
257	/* boolean_t */		alive:1,	/* Not yet terminated */
258
259	/* boolean_t */		purgable:2,	/* Purgable state.  See
260						 * VM_PURGABLE_*
261						 */
262	/* boolean_t */		purgeable_when_ripe:1, /* Purgeable when a token
263							* becomes ripe.
264							*/
265	/* boolean_t */		shadowed:1,	/* Shadow may exist */
266	/* boolean_t */		advisory_pageout:1,
267						/* Instead of sending page
268						 * via OOL, just notify
269						 * pager that the kernel
270						 * wants to discard it, page
271						 * remains in object */
272	/* boolean_t */		true_share:1,
273						/* This object is mapped
274						 * in more than one place
275						 * and hence cannot be
276						 * coalesced */
277	/* boolean_t */		terminating:1,
278						/* Allows vm_object_lookup
279						 * and vm_object_deallocate
280						 * to special case their
281						 * behavior when they are
282						 * called as a result of
283						 * page cleaning during
284						 * object termination
285						 */
286	/* boolean_t */		named:1,	/* An enforces an internal
287						 * naming convention, by
288						 * calling the right routines
289						 * for allocation and
290						 * destruction, UBC references
291						 * against the vm_object are
292						 * checked.
293						 */
294	/* boolean_t */		shadow_severed:1,
295						/* When a permanent object
296						 * backing a COW goes away
297					  	 * unexpectedly.  This bit
298						 * allows vm_fault to return
299						 * an error rather than a
300						 * zero filled page.
301						 */
302	/* boolean_t */		phys_contiguous:1,
303						/* Memory is wired and
304						 * guaranteed physically
305						 * contiguous.  However
306						 * it is not device memory
307						 * and obeys normal virtual
308						 * memory rules w.r.t pmap
309						 * access bits.
310						 */
311	/* boolean_t */		nophyscache:1;
312						/* When mapped at the
313						 * pmap level, don't allow
314						 * primary caching. (for
315						 * I/O)
316						 */
317
318
319
320	queue_chain_t		cached_list;	/* Attachment point for the
321						 * list of objects cached as a
322						 * result of their can_persist
323						 * value
324						 */
325
326	queue_head_t		msr_q;		/* memory object synchronise
327						   request queue */
328
329  /*
330   * the following fields are not protected by any locks
331   * they are updated via atomic compare and swap
332   */
333	vm_object_offset_t	last_alloc;	/* last allocation offset */
334	int			sequential;	/* sequential access size */
335
336        uint32_t		pages_created;
337        uint32_t		pages_used;
338#if	MACH_PAGEMAP
339	vm_external_map_t	existence_map;	/* bitmap of pages written to
340						 * backing storage */
341#endif	/* MACH_PAGEMAP */
342	vm_offset_t		cow_hint;	/* last page present in     */
343						/* shadow but not in object */
344#if	MACH_ASSERT
345	struct vm_object	*paging_object;	/* object which pages to be
346						 * swapped out are temporary
347						 * put in current object
348						 */
349#endif
350	/* hold object lock when altering */
351	unsigned	int
352		wimg_bits:8,	        /* cache WIMG bits         */
353		code_signed:1,		/* pages are signed and should be
354					   validated; the signatures are stored
355					   with the pager */
356		hashed:1,		/* object/pager entered in hash */
357		transposed:1,		/* object was transposed with another */
358		mapping_in_progress:1,	/* pager being mapped/unmapped */
359		phantom_isssd:1,
360		volatile_empty:1,
361		volatile_fault:1,
362		all_reusable:1,
363		blocked_access:1,
364		set_cache_attr:1,
365		object_slid:1,
366		purgeable_queue_type:2,
367		purgeable_queue_group:3,
368		io_tracking:1,
369		__object2_unused_bits:7;	/* for expansion */
370
371	uint32_t		scan_collisions;
372#if CONFIG_PHANTOM_CACHE
373	uint32_t		phantom_object_id;
374#endif
375#if CONFIG_IOSCHED || UPL_DEBUG
376	queue_head_t		uplq;		/* List of outstanding upls */
377#endif
378
379#ifdef	VM_PIP_DEBUG
380/*
381 * Keep track of the stack traces for the first holders
382 * of a "paging_in_progress" reference for this VM object.
383 */
384#define VM_PIP_DEBUG_STACK_FRAMES	25	/* depth of each stack trace */
385#define VM_PIP_DEBUG_MAX_REFS		10	/* track that many references */
386	struct __pip_backtrace {
387		void *pip_retaddr[VM_PIP_DEBUG_STACK_FRAMES];
388	} pip_holders[VM_PIP_DEBUG_MAX_REFS];
389#endif	/* VM_PIP_DEBUG  */
390
391        queue_chain_t		objq;      /* object queue - currently used for purgable queues */
392
393#if DEBUG
394	void *purgeable_owner_bt[16];
395	task_t vo_purgeable_volatilizer; /* who made it volatile? */
396	void *purgeable_volatilizer_bt[16];
397#endif /* DEBUG */
398};
399
400#define VM_OBJECT_PURGEABLE_FAULT_ERROR(object)				\
401	((object)->volatile_fault &&					\
402	 ((object)->purgable == VM_PURGABLE_VOLATILE ||			\
403	  (object)->purgable == VM_PURGABLE_EMPTY))
404
405#define VM_PAGE_REMOVE(page)						\
406	MACRO_BEGIN							\
407	vm_page_t __page = (page);					\
408	vm_object_t __object = __page->object;				\
409	if (__page == __object->memq_hint) {				\
410		vm_page_t	__new_hint;				\
411		queue_entry_t	__qe;					\
412		__qe = queue_next(&__page->listq);			\
413		if (queue_end(&__object->memq, __qe)) {			\
414			__qe = queue_prev(&__page->listq);		\
415			if (queue_end(&__object->memq, __qe)) {		\
416				__qe = NULL;				\
417			}						\
418		}							\
419		__new_hint = (vm_page_t) __qe;				\
420		__object->memq_hint = __new_hint;			\
421	}								\
422	queue_remove(&__object->memq, __page, vm_page_t, listq);	\
423	MACRO_END
424
425#define VM_PAGE_INSERT(page, object)				\
426	MACRO_BEGIN						\
427	vm_page_t __page = (page);				\
428	vm_object_t __object = (object);			\
429	queue_enter(&__object->memq, __page, vm_page_t, listq); \
430	__object->memq_hint = __page;				\
431	MACRO_END
432
433extern
434vm_object_t	kernel_object;		/* the single kernel object */
435
436extern
437vm_object_t	compressor_object;	/* the single compressor object */
438
439extern
440unsigned int	vm_object_absent_max;	/* maximum number of absent pages
441					   at a time for each object */
442
443# define	VM_MSYNC_INITIALIZED			0
444# define	VM_MSYNC_SYNCHRONIZING			1
445# define	VM_MSYNC_DONE				2
446
447struct msync_req {
448	queue_chain_t		msr_q;		/* object request queue */
449	queue_chain_t		req_q;		/* vm_msync request queue */
450	unsigned int		flag;
451	vm_object_offset_t	offset;
452	vm_object_size_t	length;
453	vm_object_t		object;		/* back pointer */
454	decl_lck_mtx_data(,	msync_req_lock)	/* Lock for this structure */
455};
456
457typedef struct msync_req	*msync_req_t;
458#define MSYNC_REQ_NULL		((msync_req_t) 0)
459
460
461extern lck_grp_t		vm_map_lck_grp;
462extern lck_attr_t		vm_map_lck_attr;
463
464/*
465 * Macros to allocate and free msync_reqs
466 */
467#define msync_req_alloc(msr)						\
468    MACRO_BEGIN							\
469        (msr) = (msync_req_t)kalloc(sizeof(struct msync_req));		\
470        lck_mtx_init(&(msr)->msync_req_lock, &vm_map_lck_grp, &vm_map_lck_attr);		\
471        msr->flag = VM_MSYNC_INITIALIZED;				\
472    MACRO_END
473
474#define msync_req_free(msr)						\
475    MACRO_BEGIN								\
476        lck_mtx_destroy(&(msr)->msync_req_lock, &vm_map_lck_grp);	\
477	kfree((msr), sizeof(struct msync_req));				\
478    MACRO_END
479
480#define msr_lock(msr)   lck_mtx_lock(&(msr)->msync_req_lock)
481#define msr_unlock(msr) lck_mtx_unlock(&(msr)->msync_req_lock)
482
483/*
484 *	Declare procedures that operate on VM objects.
485 */
486
487__private_extern__ void		vm_object_bootstrap(void);
488
489__private_extern__ void		vm_object_init(void);
490
491__private_extern__ void		vm_object_init_lck_grp(void);
492
493__private_extern__ void		vm_object_reaper_init(void);
494
495__private_extern__ vm_object_t	vm_object_allocate(
496					vm_object_size_t	size);
497
498__private_extern__ void    _vm_object_allocate(vm_object_size_t size,
499			    vm_object_t object);
500
501#if	TASK_SWAPPER
502
503__private_extern__ void	vm_object_res_reference(
504				vm_object_t 		object);
505__private_extern__ void	vm_object_res_deallocate(
506				vm_object_t		object);
507#define	VM_OBJ_RES_INCR(object)	(object)->res_count++
508#define	VM_OBJ_RES_DECR(object)	(object)->res_count--
509
510#else	/* TASK_SWAPPER */
511
512#define	VM_OBJ_RES_INCR(object)
513#define	VM_OBJ_RES_DECR(object)
514#define vm_object_res_reference(object)
515#define vm_object_res_deallocate(object)
516
517#endif	/* TASK_SWAPPER */
518
519#define vm_object_reference_locked(object)		\
520	MACRO_BEGIN					\
521	vm_object_t RLObject = (object);		\
522	vm_object_lock_assert_exclusive(object);	\
523	assert((RLObject)->ref_count > 0);		\
524	(RLObject)->ref_count++;			\
525	assert((RLObject)->ref_count > 1);		\
526	vm_object_res_reference(RLObject);		\
527	MACRO_END
528
529
530#define vm_object_reference_shared(object)				\
531	MACRO_BEGIN							\
532	vm_object_t RLObject = (object);				\
533	vm_object_lock_assert_shared(object);				\
534	assert((RLObject)->ref_count > 0);				\
535	OSAddAtomic(1, &(RLObject)->ref_count);		\
536	assert((RLObject)->ref_count > 0);				\
537	/* XXX we would need an atomic version of the following ... */	\
538	vm_object_res_reference(RLObject);				\
539	MACRO_END
540
541
542__private_extern__ void		vm_object_reference(
543					vm_object_t	object);
544
545#if	!MACH_ASSERT
546
547#define	vm_object_reference(object)			\
548MACRO_BEGIN						\
549	vm_object_t RObject = (object);			\
550	if (RObject) {					\
551		vm_object_lock_shared(RObject);		\
552		vm_object_reference_shared(RObject);	\
553		vm_object_unlock(RObject);		\
554	}						\
555MACRO_END
556
557#endif	/* MACH_ASSERT */
558
559__private_extern__ void		vm_object_deallocate(
560					vm_object_t	object);
561
562__private_extern__ kern_return_t vm_object_release_name(
563					vm_object_t	object,
564					int		flags);
565
566__private_extern__ void		vm_object_pmap_protect(
567					vm_object_t		object,
568					vm_object_offset_t	offset,
569					vm_object_size_t	size,
570					pmap_t			pmap,
571					vm_map_offset_t		pmap_start,
572					vm_prot_t		prot);
573
574__private_extern__ void		vm_object_pmap_protect_options(
575					vm_object_t		object,
576					vm_object_offset_t	offset,
577					vm_object_size_t	size,
578					pmap_t			pmap,
579					vm_map_offset_t		pmap_start,
580					vm_prot_t		prot,
581					int			options);
582
583__private_extern__ void		vm_object_page_remove(
584					vm_object_t		object,
585					vm_object_offset_t	start,
586					vm_object_offset_t	end);
587
588__private_extern__ void		vm_object_deactivate_pages(
589					vm_object_t		object,
590					vm_object_offset_t	offset,
591					vm_object_size_t	size,
592					boolean_t               kill_page,
593					boolean_t		reusable_page);
594
595__private_extern__ void	vm_object_reuse_pages(
596	vm_object_t		object,
597	vm_object_offset_t	start_offset,
598	vm_object_offset_t	end_offset,
599	boolean_t		allow_partial_reuse);
600
601__private_extern__ void		vm_object_purge(
602	                               vm_object_t		object,
603				       int			flags);
604
605__private_extern__ kern_return_t vm_object_purgable_control(
606	vm_object_t	object,
607	vm_purgable_t	control,
608	int		*state);
609
610__private_extern__ kern_return_t vm_object_get_page_counts(
611	vm_object_t		object,
612	vm_object_offset_t	offset,
613	vm_object_size_t	size,
614	unsigned int		*resident_page_count,
615	unsigned int		*dirty_page_count);
616
617__private_extern__ boolean_t	vm_object_coalesce(
618					vm_object_t		prev_object,
619					vm_object_t		next_object,
620					vm_object_offset_t	prev_offset,
621					vm_object_offset_t	next_offset,
622					vm_object_size_t	prev_size,
623					vm_object_size_t	next_size);
624
625__private_extern__ boolean_t	vm_object_shadow(
626					vm_object_t		*object,
627					vm_object_offset_t	*offset,
628					vm_object_size_t	length);
629
630__private_extern__ void		vm_object_collapse(
631					vm_object_t		object,
632					vm_object_offset_t	offset,
633					boolean_t		can_bypass);
634
635__private_extern__ boolean_t	vm_object_copy_quickly(
636				vm_object_t		*_object,
637				vm_object_offset_t	src_offset,
638				vm_object_size_t	size,
639				boolean_t		*_src_needs_copy,
640				boolean_t		*_dst_needs_copy);
641
642__private_extern__ kern_return_t	vm_object_copy_strategically(
643				vm_object_t		src_object,
644				vm_object_offset_t	src_offset,
645				vm_object_size_t	size,
646				vm_object_t		*dst_object,
647				vm_object_offset_t	*dst_offset,
648				boolean_t		*dst_needs_copy);
649
650__private_extern__ kern_return_t	vm_object_copy_slowly(
651				vm_object_t		src_object,
652				vm_object_offset_t	src_offset,
653				vm_object_size_t	size,
654				boolean_t		interruptible,
655				vm_object_t		*_result_object);
656
657__private_extern__ vm_object_t	vm_object_copy_delayed(
658				vm_object_t		src_object,
659				vm_object_offset_t	src_offset,
660				vm_object_size_t	size,
661				boolean_t		src_object_shared);
662
663
664
665__private_extern__ kern_return_t	vm_object_destroy(
666					vm_object_t	object,
667					kern_return_t	reason);
668
669__private_extern__ void		vm_object_pager_create(
670					vm_object_t	object);
671
672__private_extern__ void		vm_object_compressor_pager_create(
673					vm_object_t	object);
674
675__private_extern__ void		vm_object_page_map(
676				vm_object_t	object,
677				vm_object_offset_t	offset,
678				vm_object_size_t	size,
679				vm_object_offset_t	(*map_fn)
680					(void *, vm_object_offset_t),
681					void 		*map_fn_data);
682
683__private_extern__ kern_return_t vm_object_upl_request(
684				vm_object_t		object,
685				vm_object_offset_t	offset,
686				upl_size_t		size,
687				upl_t			*upl,
688				upl_page_info_t		*page_info,
689				unsigned int		*count,
690				int			flags);
691
692__private_extern__ kern_return_t vm_object_transpose(
693				vm_object_t		object1,
694				vm_object_t		object2,
695				vm_object_size_t	transpose_size);
696
697__private_extern__ boolean_t vm_object_sync(
698				vm_object_t		object,
699				vm_object_offset_t	offset,
700				vm_object_size_t	size,
701				boolean_t		should_flush,
702				boolean_t		should_return,
703				boolean_t		should_iosync);
704
705__private_extern__ kern_return_t vm_object_update(
706				vm_object_t		object,
707				vm_object_offset_t	offset,
708				vm_object_size_t	size,
709				vm_object_offset_t	*error_offset,
710				int			*io_errno,
711				memory_object_return_t	should_return,
712				int			flags,
713				vm_prot_t		prot);
714
715__private_extern__ kern_return_t vm_object_lock_request(
716				vm_object_t		object,
717				vm_object_offset_t	offset,
718				vm_object_size_t	size,
719				memory_object_return_t	should_return,
720				int			flags,
721				vm_prot_t		prot);
722
723
724
725__private_extern__ vm_object_t	vm_object_enter(
726					memory_object_t		pager,
727					vm_object_size_t	size,
728					boolean_t		internal,
729					boolean_t		init,
730					boolean_t		check_named);
731
732
733__private_extern__ void	vm_object_cluster_size(
734					vm_object_t		object,
735					vm_object_offset_t	*start,
736					vm_size_t		*length,
737					vm_object_fault_info_t  fault_info,
738					uint32_t		*io_streaming);
739
740__private_extern__ kern_return_t vm_object_populate_with_private(
741	vm_object_t		object,
742	vm_object_offset_t	offset,
743	ppnum_t			phys_page,
744	vm_size_t		size);
745
746__private_extern__ void vm_object_change_wimg_mode(
747	vm_object_t		object,
748	unsigned int		wimg_mode);
749
750extern kern_return_t adjust_vm_object_cache(
751	vm_size_t oval,
752	vm_size_t nval);
753
754extern kern_return_t vm_object_page_op(
755	vm_object_t		object,
756	vm_object_offset_t	offset,
757	int			ops,
758	ppnum_t			*phys_entry,
759	int			*flags);
760
761extern kern_return_t vm_object_range_op(
762	vm_object_t		object,
763	vm_object_offset_t	offset_beg,
764	vm_object_offset_t	offset_end,
765	int                     ops,
766	uint32_t		*range);
767
768
769__private_extern__ void		vm_object_reap_pages(
770	                                vm_object_t object,
771					int	reap_type);
772#define REAP_REAP	0
773#define	REAP_TERMINATE	1
774#define REAP_PURGEABLE	2
775#define REAP_DATA_FLUSH	3
776
777#if CONFIG_FREEZE
778struct default_freezer_handle;
779
780__private_extern__ kern_return_t
781vm_object_pack(
782	unsigned int		*purgeable_count,
783	unsigned int		*wired_count,
784	unsigned int		*clean_count,
785	unsigned int		*dirty_count,
786	unsigned int		dirty_budget,
787	boolean_t		*shared,
788	vm_object_t		src_object,
789	struct default_freezer_handle *df_handle);
790
791__private_extern__ void
792vm_object_pack_pages(
793	unsigned int		*wired_count,
794	unsigned int		*clean_count,
795	unsigned int		*dirty_count,
796	unsigned int		dirty_budget,
797	vm_object_t		src_object,
798	struct default_freezer_handle *df_handle);
799
800__private_extern__ void
801vm_object_pageout(
802	vm_object_t     object);
803
804__private_extern__  kern_return_t
805vm_object_pagein(
806	vm_object_t     object);
807#endif /* CONFIG_FREEZE */
808
809#if CONFIG_IOSCHED
810struct io_reprioritize_req {
811	uint64_t 	blkno;
812	uint32_t 	len;
813	int 		priority;
814	struct vnode 	*devvp;
815	queue_chain_t 	io_reprioritize_list;
816};
817typedef struct io_reprioritize_req *io_reprioritize_req_t;
818
819extern void vm_io_reprioritize_init(void);
820#endif
821
822/*
823 *	Event waiting handling
824 */
825
826#define	VM_OBJECT_EVENT_INITIALIZED		0
827#define	VM_OBJECT_EVENT_PAGER_READY		1
828#define	VM_OBJECT_EVENT_PAGING_IN_PROGRESS	2
829#define	VM_OBJECT_EVENT_MAPPING_IN_PROGRESS	3
830#define	VM_OBJECT_EVENT_LOCK_IN_PROGRESS	4
831#define	VM_OBJECT_EVENT_UNCACHING		5
832#define	VM_OBJECT_EVENT_COPY_CALL		6
833#define	VM_OBJECT_EVENT_CACHING			7
834#define VM_OBJECT_EVENT_UNBLOCKED		8
835#define VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS	9
836
837#define	vm_object_assert_wait(object, event, interruptible)		\
838	(((object)->all_wanted |= 1 << (event)),			\
839	 assert_wait((event_t)((vm_offset_t)(object)+(event)),(interruptible)))
840
841#define	vm_object_wait(object, event, interruptible)			\
842	(vm_object_assert_wait((object),(event),(interruptible)),	\
843	vm_object_unlock(object),					\
844	thread_block(THREAD_CONTINUE_NULL))				\
845
846#define thread_sleep_vm_object(object, event, interruptible)		\
847        lck_rw_sleep(&(object)->Lock, LCK_SLEEP_PROMOTED_PRI, (event_t)(event), (interruptible))
848
849#define vm_object_sleep(object, event, interruptible)			\
850	(((object)->all_wanted |= 1 << (event)),			\
851	 thread_sleep_vm_object((object), 				\
852		((vm_offset_t)(object)+(event)), (interruptible)))
853
854#define	vm_object_wakeup(object, event)					\
855	MACRO_BEGIN							\
856	if ((object)->all_wanted & (1 << (event)))			\
857		thread_wakeup((event_t)((vm_offset_t)(object) + (event))); \
858	(object)->all_wanted &= ~(1 << (event));			\
859	MACRO_END
860
861#define	vm_object_set_wanted(object, event)				\
862	MACRO_BEGIN							\
863	((object)->all_wanted |= (1 << (event)));			\
864	MACRO_END
865
866#define	vm_object_wanted(object, event)					\
867	((object)->all_wanted & (1 << (event)))
868
869/*
870 *	Routines implemented as macros
871 */
872#ifdef VM_PIP_DEBUG
873#include <libkern/OSDebug.h>
874#define VM_PIP_DEBUG_BEGIN(object)					\
875	MACRO_BEGIN							\
876	int pip = ((object)->paging_in_progress +			\
877		   (object)->activity_in_progress);			\
878	if (pip < VM_PIP_DEBUG_MAX_REFS) {				\
879		(void) OSBacktrace(&(object)->pip_holders[pip].pip_retaddr[0], \
880				   VM_PIP_DEBUG_STACK_FRAMES);		\
881	}								\
882	MACRO_END
883#else	/* VM_PIP_DEBUG */
884#define VM_PIP_DEBUG_BEGIN(object)
885#endif	/* VM_PIP_DEBUG */
886
887#define		vm_object_activity_begin(object)			\
888	MACRO_BEGIN							\
889	vm_object_lock_assert_exclusive((object));			\
890	VM_PIP_DEBUG_BEGIN((object));					\
891	(object)->activity_in_progress++;				\
892	if ((object)->activity_in_progress == 0) {			\
893		panic("vm_object_activity_begin(%p): overflow\n", (object));\
894	}								\
895	MACRO_END
896
897#define		vm_object_activity_end(object)				\
898	MACRO_BEGIN							\
899	vm_object_lock_assert_exclusive((object));			\
900	if ((object)->activity_in_progress == 0) {			\
901		panic("vm_object_activity_end(%p): underflow\n", (object));\
902	}								\
903	(object)->activity_in_progress--;				\
904	if ((object)->paging_in_progress == 0 &&			\
905	    (object)->activity_in_progress == 0)			\
906		vm_object_wakeup((object),				\
907				 VM_OBJECT_EVENT_PAGING_IN_PROGRESS);	\
908	MACRO_END
909
910#define		vm_object_paging_begin(object)				\
911	MACRO_BEGIN							\
912	vm_object_lock_assert_exclusive((object));			\
913	VM_PIP_DEBUG_BEGIN((object));					\
914	(object)->paging_in_progress++;					\
915	if ((object)->paging_in_progress == 0) {			\
916		panic("vm_object_paging_begin(%p): overflow\n", (object));\
917	}								\
918	MACRO_END
919
920#define		vm_object_paging_end(object)				\
921	MACRO_BEGIN							\
922	vm_object_lock_assert_exclusive((object));			\
923	if ((object)->paging_in_progress == 0) {			\
924		panic("vm_object_paging_end(%p): underflow\n", (object));\
925	}								\
926	(object)->paging_in_progress--;					\
927	if ((object)->paging_in_progress == 0) {			\
928		vm_object_wakeup((object),				\
929				 VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS); \
930		if ((object)->activity_in_progress == 0)		\
931			vm_object_wakeup((object),			\
932					 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
933	}								\
934	MACRO_END
935
936#define		vm_object_paging_wait(object, interruptible)		\
937	MACRO_BEGIN							\
938	vm_object_lock_assert_exclusive((object));			\
939	while ((object)->paging_in_progress != 0 ||			\
940	       (object)->activity_in_progress != 0) {			\
941		wait_result_t  _wr;					\
942									\
943		_wr = vm_object_sleep((object),				\
944				VM_OBJECT_EVENT_PAGING_IN_PROGRESS,	\
945				(interruptible));			\
946									\
947		/*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\
948			/*XXX break; */					\
949	}								\
950	MACRO_END
951
952#define vm_object_paging_only_wait(object, interruptible)		\
953	MACRO_BEGIN							\
954	vm_object_lock_assert_exclusive((object));			\
955	while ((object)->paging_in_progress != 0) {			\
956		wait_result_t  _wr;					\
957									\
958		_wr = vm_object_sleep((object),				\
959				VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS,\
960				(interruptible));			\
961									\
962		/*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\
963			/*XXX break; */					\
964	}								\
965	MACRO_END
966
967
968#define vm_object_mapping_begin(object) 				\
969	MACRO_BEGIN							\
970	vm_object_lock_assert_exclusive((object));			\
971	assert(! (object)->mapping_in_progress);			\
972	(object)->mapping_in_progress = TRUE;				\
973	MACRO_END
974
975#define vm_object_mapping_end(object)					\
976	MACRO_BEGIN							\
977	vm_object_lock_assert_exclusive((object));			\
978	assert((object)->mapping_in_progress);				\
979	(object)->mapping_in_progress = FALSE;				\
980	vm_object_wakeup((object),					\
981			 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS);		\
982	MACRO_END
983
984#define vm_object_mapping_wait(object, interruptible)			\
985	MACRO_BEGIN							\
986	vm_object_lock_assert_exclusive((object));			\
987	while ((object)->mapping_in_progress) {				\
988		wait_result_t	_wr;					\
989									\
990		_wr = vm_object_sleep((object),				\
991				      VM_OBJECT_EVENT_MAPPING_IN_PROGRESS, \
992				      (interruptible));			\
993		/*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\
994			/*XXX break; */					\
995	}								\
996	assert(!(object)->mapping_in_progress);				\
997	MACRO_END
998
999
1000
1001#define OBJECT_LOCK_SHARED	0
1002#define OBJECT_LOCK_EXCLUSIVE	1
1003
1004extern lck_grp_t	vm_object_lck_grp;
1005extern lck_grp_attr_t	vm_object_lck_grp_attr;
1006extern lck_attr_t	vm_object_lck_attr;
1007extern lck_attr_t	kernel_object_lck_attr;
1008extern lck_attr_t	compressor_object_lck_attr;
1009
1010extern vm_object_t	vm_pageout_scan_wants_object;
1011
1012extern void		vm_object_lock(vm_object_t);
1013extern boolean_t	vm_object_lock_try(vm_object_t);
1014extern boolean_t	_vm_object_lock_try(vm_object_t);
1015extern boolean_t	vm_object_lock_avoid(vm_object_t);
1016extern void		vm_object_lock_shared(vm_object_t);
1017extern boolean_t	vm_object_lock_try_shared(vm_object_t);
1018
1019/*
1020 *	Object locking macros
1021 */
1022
1023#define vm_object_lock_init(object)					\
1024	lck_rw_init(&(object)->Lock, &vm_object_lck_grp,		\
1025		    (((object) == kernel_object ||			\
1026		      (object) == vm_submap_object) ?			\
1027		     &kernel_object_lck_attr :				\
1028		     (((object) == compressor_object) ?			\
1029		     &compressor_object_lck_attr :			\
1030		      &vm_object_lck_attr)))
1031#define vm_object_lock_destroy(object)	lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp)
1032
1033#define vm_object_unlock(object)	lck_rw_done(&(object)->Lock)
1034#define vm_object_lock_upgrade(object)	lck_rw_lock_shared_to_exclusive(&(object)->Lock)
1035#define vm_object_lock_try_scan(object)	_vm_object_lock_try(object)
1036
1037/*
1038 * CAUTION: the following vm_object_lock_assert_held*() macros merely
1039 * check if anyone is holding the lock, but the holder may not necessarily
1040 * be the caller...
1041 */
1042#if MACH_ASSERT || DEBUG
1043#define vm_object_lock_assert_held(object) \
1044	lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_HELD)
1045#define vm_object_lock_assert_shared(object)	\
1046	lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_SHARED)
1047#define vm_object_lock_assert_exclusive(object) \
1048	lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE)
1049#else  /* MACH_ASSERT || DEBUG */
1050#define vm_object_lock_assert_held(object)
1051#define vm_object_lock_assert_shared(object)
1052#define vm_object_lock_assert_exclusive(object)
1053#endif /* MACH_ASSERT || DEBUG */
1054
1055#define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
1056#define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK))
1057
1058extern void	vm_object_cache_add(vm_object_t);
1059extern void	vm_object_cache_remove(vm_object_t);
1060extern int	vm_object_cache_evict(int, int);
1061
1062#endif	/* _VM_VM_OBJECT_H_ */
1063