1/*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
49 *  School of Computer Science
50 *  Carnegie Mellon University
51 *  Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 *	File:	vm/vm_pageout.h
60 *	Author:	Avadis Tevanian, Jr.
61 *	Date:	1986
62 *
63 *	Declarations for the pageout daemon interface.
64 */
65
66#ifndef	_VM_VM_PAGEOUT_H_
67#define _VM_VM_PAGEOUT_H_
68
69#ifdef	KERNEL_PRIVATE
70
71#include <mach/mach_types.h>
72#include <mach/boolean.h>
73#include <mach/machine/vm_types.h>
74#include <mach/memory_object_types.h>
75
76#include <kern/kern_types.h>
77#include <kern/lock.h>
78
79#include <libkern/OSAtomic.h>
80
81
82#include <vm/vm_options.h>
83
84#ifdef	MACH_KERNEL_PRIVATE
85#include <vm/vm_page.h>
86#endif
87
88#include <sys/kdebug.h>
89
90#define VM_PAGE_CLEANED_TARGET	30000		/* 25600 pages = 100 MB */
91#define VM_PAGE_CLEANED_MIN	((VM_PAGE_CLEANED_TARGET * 80) / 100)
92
93#define VM_PAGE_AVAILABLE_COUNT()		((unsigned int)(vm_page_cleaned_count))
94
95/* externally manipulated counters */
96extern unsigned int vm_pageout_cleaned_reactivated, vm_pageout_cleaned_fault_reactivated, vm_pageout_cleaned_commit_reactivated;
97
98#if CONFIG_FREEZE
99extern boolean_t memorystatus_freeze_enabled;
100#define VM_DYNAMIC_PAGING_ENABLED(port) ((memorystatus_freeze_enabled == FALSE) && IP_VALID(port))
101#else
102#define VM_DYNAMIC_PAGING_ENABLED(port) IP_VALID(port)
103#endif
104
105
106extern int	vm_debug_events;
107
108#define VMF_CHECK_ZFDELAY	0x100
109#define VMF_COWDELAY		0x101
110#define VMF_ZFDELAY		0x102
111
112#define VM_PAGEOUT_SCAN		0x104
113#define VM_PAGEOUT_BALANCE	0x105
114#define VM_PAGEOUT_FREELIST	0x106
115#define VM_PAGEOUT_PURGEONE	0x107
116#define VM_PAGEOUT_CACHE_EVICT	0x108
117#define VM_PAGEOUT_THREAD_BLOCK	0x109
118
119#define VM_UPL_PAGE_WAIT	0x120
120#define VM_IOPL_PAGE_WAIT	0x121
121
122#define VM_PRESSURE_EVENT	0x130
123
124#define VM_DEBUG_EVENT(name, event, control, arg1, arg2, arg3, arg4)	\
125	MACRO_BEGIN						\
126	if (vm_debug_events) {					\
127		KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, event)) | control, arg1, arg2, arg3, arg4, 0); \
128	}							\
129	MACRO_END
130
131
132
133extern kern_return_t vm_map_create_upl(
134	vm_map_t		map,
135	vm_map_address_t	offset,
136	upl_size_t		*upl_size,
137	upl_t			*upl,
138	upl_page_info_array_t	page_list,
139	unsigned int		*count,
140	int			*flags);
141
142extern ppnum_t upl_get_highest_page(
143	upl_t			upl);
144
145extern upl_size_t upl_get_size(
146	upl_t			upl);
147
148
149#ifndef	MACH_KERNEL_PRIVATE
150typedef struct vm_page	*vm_page_t;
151#endif
152
153extern void                vm_page_free_list(
154                            vm_page_t	mem,
155                            boolean_t	prepare_object);
156
157extern kern_return_t      vm_page_alloc_list(
158                            int         page_count,
159                            int			flags,
160                            vm_page_t * list);
161
162extern void               vm_page_set_offset(vm_page_t page, vm_object_offset_t offset);
163extern vm_object_offset_t vm_page_get_offset(vm_page_t page);
164extern ppnum_t            vm_page_get_phys_page(vm_page_t page);
165extern vm_page_t          vm_page_get_next(vm_page_t page);
166
167#ifdef	MACH_KERNEL_PRIVATE
168
169#include <vm/vm_page.h>
170
171extern unsigned int	vm_pageout_scan_event_counter;
172extern unsigned int	vm_page_anonymous_count;
173
174
175/*
176 * must hold the page queues lock to
177 * manipulate this structure
178 */
179struct vm_pageout_queue {
180        queue_head_t	pgo_pending;	/* laundry pages to be processed by pager's iothread */
181        unsigned int	pgo_laundry;	/* current count of laundry pages on queue or in flight */
182        unsigned int	pgo_maxlaundry;
183	uint64_t	pgo_tid;	/* thread ID of I/O thread that services this queue */
184	uint8_t		pgo_lowpriority; /* iothread is set to use low priority I/O */
185
186        unsigned int	pgo_idle:1,	/* iothread is blocked waiting for work to do */
187	                pgo_busy:1,     /* iothread is currently processing request from pgo_pending */
188			pgo_throttled:1,/* vm_pageout_scan thread needs a wakeup when pgo_laundry drops */
189		        pgo_draining:1,
190			pgo_inited:1,
191			:0;
192};
193
194#define VM_PAGE_Q_THROTTLED(q)		\
195        ((q)->pgo_laundry >= (q)->pgo_maxlaundry)
196
197extern struct	vm_pageout_queue	vm_pageout_queue_internal;
198extern struct	vm_pageout_queue	vm_pageout_queue_external;
199
200
201/*
202 *	Routines exported to Mach.
203 */
204extern void		vm_pageout(void);
205
206extern kern_return_t	vm_pageout_internal_start(void);
207
208extern void		vm_pageout_object_terminate(
209					vm_object_t	object);
210
211extern void		vm_pageout_cluster(
212	                                vm_page_t	m,
213					boolean_t	pageout);
214
215extern void		vm_pageout_initialize_page(
216					vm_page_t	m);
217
218extern void		vm_pageclean_setup(
219					vm_page_t		m,
220					vm_page_t		new_m,
221					vm_object_t		new_object,
222					vm_object_offset_t	new_offset);
223
224/* UPL exported routines and structures */
225
226#define upl_lock_init(object)	lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr)
227#define upl_lock_destroy(object)	lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp)
228#define upl_lock(object)	lck_mtx_lock(&(object)->Lock)
229#define upl_unlock(object)	lck_mtx_unlock(&(object)->Lock)
230
231#define MAX_VECTOR_UPL_ELEMENTS	8
232
233struct _vector_upl_iostates{
234	upl_offset_t offset;
235	upl_size_t   size;
236};
237
238typedef struct _vector_upl_iostates vector_upl_iostates_t;
239
240struct _vector_upl {
241	upl_size_t		size;
242	uint32_t		num_upls;
243	uint32_t		invalid_upls;
244	uint32_t		_reserved;
245	vm_map_t		submap;
246	vm_offset_t		submap_dst_addr;
247	vm_object_offset_t	offset;
248	upl_t			upl_elems[MAX_VECTOR_UPL_ELEMENTS];
249	upl_page_info_array_t	pagelist;
250	vector_upl_iostates_t	upl_iostates[MAX_VECTOR_UPL_ELEMENTS];
251};
252
253typedef struct _vector_upl* vector_upl_t;
254
255/* universal page list structure */
256
257#if UPL_DEBUG
258#define	UPL_DEBUG_STACK_FRAMES	16
259#define UPL_DEBUG_COMMIT_RECORDS 4
260
261struct ucd {
262	upl_offset_t	c_beg;
263	upl_offset_t	c_end;
264	int		c_aborted;
265	void *		c_retaddr[UPL_DEBUG_STACK_FRAMES];
266};
267#endif
268
269
270struct upl {
271	decl_lck_mtx_data(,	Lock)	/* Synchronization */
272	int		ref_count;
273	int		ext_ref_count;
274	int		flags;
275	vm_object_t	src_object; /* object derived from */
276	vm_object_offset_t offset;
277	upl_size_t	size;	    /* size in bytes of the address space */
278	vm_offset_t	kaddr;      /* secondary mapping in kernel */
279	vm_object_t	map_object;
280	ppnum_t		highest_page;
281	void*		vector_upl;
282#if	UPL_DEBUG
283	uintptr_t	ubc_alias1;
284	uintptr_t 	ubc_alias2;
285	queue_chain_t	uplq;	    /* List of outstanding upls on an obj */
286
287	thread_t	upl_creator;
288	uint32_t	upl_state;
289	uint32_t	upl_commit_index;
290	void	*upl_create_retaddr[UPL_DEBUG_STACK_FRAMES];
291
292	struct  ucd	upl_commit_records[UPL_DEBUG_COMMIT_RECORDS];
293#endif	/* UPL_DEBUG */
294};
295
296/* upl struct flags */
297#define UPL_PAGE_LIST_MAPPED	0x1
298#define UPL_KERNEL_MAPPED 	0x2
299#define	UPL_CLEAR_DIRTY		0x4
300#define UPL_COMPOSITE_LIST	0x8
301#define UPL_INTERNAL		0x10
302#define UPL_PAGE_SYNC_DONE	0x20
303#define UPL_DEVICE_MEMORY	0x40
304#define UPL_PAGEOUT		0x80
305#define UPL_LITE		0x100
306#define UPL_IO_WIRE		0x200
307#define UPL_ACCESS_BLOCKED	0x400
308#define UPL_ENCRYPTED		0x800
309#define UPL_SHADOWED		0x1000
310#define UPL_KERNEL_OBJECT	0x2000
311#define UPL_VECTOR		0x4000
312#define UPL_SET_DIRTY		0x8000
313#define UPL_HAS_BUSY		0x10000
314
315/* flags for upl_create flags parameter */
316#define UPL_CREATE_EXTERNAL	0
317#define UPL_CREATE_INTERNAL	0x1
318#define UPL_CREATE_LITE		0x2
319
320extern upl_t vector_upl_create(vm_offset_t);
321extern void vector_upl_deallocate(upl_t);
322extern boolean_t vector_upl_is_valid(upl_t);
323extern boolean_t vector_upl_set_subupl(upl_t, upl_t, u_int32_t);
324extern void vector_upl_set_pagelist(upl_t);
325extern void vector_upl_set_submap(upl_t, vm_map_t, vm_offset_t);
326extern void vector_upl_get_submap(upl_t, vm_map_t*, vm_offset_t*);
327extern void vector_upl_set_iostate(upl_t, upl_t, upl_offset_t, upl_size_t);
328extern void vector_upl_get_iostate(upl_t, upl_t, upl_offset_t*, upl_size_t*);
329extern void vector_upl_get_iostate_byindex(upl_t, uint32_t, upl_offset_t*, upl_size_t*);
330extern upl_t vector_upl_subupl_byindex(upl_t , uint32_t);
331extern upl_t vector_upl_subupl_byoffset(upl_t , upl_offset_t*, upl_size_t*);
332
333extern void vm_object_set_pmap_cache_attr(
334		vm_object_t		object,
335		upl_page_info_array_t	user_page_list,
336		unsigned int		num_pages,
337		boolean_t		batch_pmap_op);
338
339extern kern_return_t vm_object_iopl_request(
340	vm_object_t		object,
341	vm_object_offset_t	offset,
342	upl_size_t		size,
343	upl_t			*upl_ptr,
344	upl_page_info_array_t	user_page_list,
345	unsigned int		*page_list_count,
346	int			cntrl_flags);
347
348extern kern_return_t vm_object_super_upl_request(
349	vm_object_t		object,
350	vm_object_offset_t	offset,
351	upl_size_t		size,
352	upl_size_t		super_cluster,
353	upl_t			*upl,
354	upl_page_info_t		*user_page_list,
355	unsigned int		*page_list_count,
356	int			cntrl_flags);
357
358/* should be just a regular vm_map_enter() */
359extern kern_return_t vm_map_enter_upl(
360	vm_map_t		map,
361	upl_t			upl,
362	vm_map_offset_t		*dst_addr);
363
364/* should be just a regular vm_map_remove() */
365extern kern_return_t vm_map_remove_upl(
366	vm_map_t		map,
367	upl_t			upl);
368
369/* wired  page list structure */
370typedef uint32_t *wpl_array_t;
371
372extern void vm_page_free_reserve(int pages);
373
374extern void vm_pageout_throttle_down(vm_page_t page);
375extern void vm_pageout_throttle_up(vm_page_t page);
376
377/*
378 * ENCRYPTED SWAP:
379 */
380extern void upl_encrypt(
381	upl_t			upl,
382	upl_offset_t		crypt_offset,
383	upl_size_t		crypt_size);
384extern void vm_page_encrypt(
385	vm_page_t		page,
386	vm_map_offset_t		kernel_map_offset);
387extern boolean_t vm_pages_encrypted; /* are there encrypted pages ? */
388extern void vm_page_decrypt(
389	vm_page_t		page,
390	vm_map_offset_t		kernel_map_offset);
391extern kern_return_t vm_paging_map_object(
392	vm_map_offset_t		*address,
393	vm_page_t		page,
394	vm_object_t		object,
395	vm_object_offset_t	offset,
396	vm_map_size_t		*size,
397	vm_prot_t		protection,
398	boolean_t		can_unlock_object);
399extern void vm_paging_unmap_object(
400	vm_object_t		object,
401	vm_map_offset_t		start,
402	vm_map_offset_t		end);
403decl_simple_lock_data(extern, vm_paging_lock)
404
405/*
406 * Backing store throttle when BS is exhausted
407 */
408extern unsigned int    vm_backing_store_low;
409
410extern void vm_pageout_steal_laundry(
411	vm_page_t page,
412	boolean_t queues_locked);
413
414extern boolean_t vm_page_is_slideable(vm_page_t m);
415
416extern kern_return_t vm_page_slide(vm_page_t page, vm_map_offset_t kernel_mapping_offset);
417#endif  /* MACH_KERNEL_PRIVATE */
418
419#if UPL_DEBUG
420extern kern_return_t  upl_ubc_alias_set(
421	upl_t upl,
422	uintptr_t alias1,
423	uintptr_t alias2);
424extern int  upl_ubc_alias_get(
425	upl_t upl,
426	uintptr_t * al,
427	uintptr_t * al2);
428#endif /* UPL_DEBUG */
429
430extern void vm_countdirtypages(void);
431
432extern void vm_backing_store_disable(
433			boolean_t	suspend);
434
435extern kern_return_t upl_transpose(
436	upl_t	upl1,
437	upl_t	upl2);
438
439extern kern_return_t mach_vm_pressure_monitor(
440	boolean_t	wait_for_pressure,
441	unsigned int	nsecs_monitored,
442	unsigned int	*pages_reclaimed_p,
443	unsigned int	*pages_wanted_p);
444
445extern kern_return_t
446vm_set_buffer_cleanup_callout(
447	boolean_t	(*func)(int));
448
449struct vm_page_stats_reusable {
450	SInt32		reusable_count;
451	uint64_t	reusable;
452	uint64_t	reused;
453	uint64_t	reused_wire;
454	uint64_t	reused_remove;
455	uint64_t	all_reusable_calls;
456	uint64_t	partial_reusable_calls;
457	uint64_t	all_reuse_calls;
458	uint64_t	partial_reuse_calls;
459	uint64_t	reusable_pages_success;
460	uint64_t	reusable_pages_failure;
461	uint64_t	reusable_pages_shared;
462	uint64_t	reuse_pages_success;
463	uint64_t	reuse_pages_failure;
464	uint64_t	can_reuse_success;
465	uint64_t	can_reuse_failure;
466};
467extern struct vm_page_stats_reusable vm_page_stats_reusable;
468
469extern int hibernate_flush_memory(void);
470
471#endif	/* KERNEL_PRIVATE */
472
473#endif	/* _VM_VM_PAGEOUT_H_ */
474