1/*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
49 *  School of Computer Science
50 *  Carnegie Mellon University
51 *  Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57/*
58 *	Default pager.
59 *		General definitions.
60 */
61
62#ifndef	_DEFAULT_PAGER_INTERNAL_H_
63#define _DEFAULT_PAGER_INTERNAL_H_
64
65#include <default_pager/diag.h>
66#include <default_pager/default_pager_types.h>
67#include <mach/mach_types.h>
68#include <ipc/ipc_port.h>
69#include <ipc/ipc_types.h>
70#include <ipc/ipc_space.h>
71#include <kern/locks.h>
72#include <kern/kalloc.h>
73#include <kern/thread.h>
74#include <vm/vm_kern.h>
75#include <device/device_types.h>
76
77/*
78 * Default option settings.
79 */
80#ifndef	PARALLEL
81#define	PARALLEL	1
82#endif
83
84#ifndef	CHECKSUM
85#define	CHECKSUM	0
86#endif
87
88#define MACH_PORT_FACE mach_port_t
89
90#if CONFIG_FREEZE
91#define	RECLAIM_SWAP	1
92#else
93#define	RECLAIM_SWAP	0
94#endif
95
96#define	USE_PRECIOUS	0
97
98#ifdef	USER_PAGER
99#define UP(stuff)	stuff
100#else	/* USER_PAGER */
101#define UP(stuff)
102#endif	/* USER_PAGER */
103
104#define dprintf(args)						\
105	do {							\
106		printf("%s[KERNEL]: ", my_name);		\
107		printf args;					\
108	} while (0)
109
110/*
111 * Debug.
112 */
113extern char	my_name[];
114
115#define DEFAULT_PAGER_DEBUG	0
116
117#if	DEFAULT_PAGER_DEBUG
118
119extern int	debug_mask;
120#define	DEBUG_MSG_EXTERNAL	0x00000001
121#define DEBUG_MSG_INTERNAL	0x00000002
122#define DEBUG_MO_EXTERNAL	0x00000100
123#define DEBUG_MO_INTERNAL	0x00000200
124#define DEBUG_VS_EXTERNAL	0x00010000
125#define DEBUG_VS_INTERNAL	0x00020000
126#define DEBUG_BS_EXTERNAL	0x01000000
127#define DEBUG_BS_INTERNAL	0x02000000
128
129#define DP_DEBUG(level, args)						\
130	do {								\
131		if (debug_mask & (level)) 				\
132			dprintf(args); 					\
133	} while (0)
134
135#define ASSERT(expr)							\
136	do {								\
137		if (!(expr))						\
138#ifndef MACH_KERNEL
139			panic("%s[%d]%s: assertion failed in %s line %d: %s",\
140			      my_name, dp_thread_id(), here,		\
141			      __FILE__, __LINE__, # expr);		\
142#else
143		  panic("%s[KERNEL]: assertion failed in %s line %d: %s",\
144			      my_name, __FILE__, __LINE__, # expr); \
145#endif
146	} while (0)
147
148#else	/* DEFAULT_PAGER_DEBUG */
149
150#define DP_DEBUG(level, args) do {} while(0)
151#define ASSERT(clause) do {} while(0)
152
153#endif	/* DEFAULT_PAGER_DEBUG */
154
155#ifndef MACH_KERNEL
156extern char *mach_error_string(kern_return_t);
157#endif
158
159#define	PAGER_SUCCESS	0
160#define	PAGER_FULL	1
161#define	PAGER_ERROR	2
162
163/*
164 * VM and IPC globals.
165 */
166#ifdef MACH_KERNEL
167#define vm_page_size PAGE_SIZE
168#define vm_page_mask PAGE_MASK
169#define vm_page_shift PAGE_SHIFT
170#else
171extern vm_object_size_t	vm_page_size;
172extern unsigned long long	vm_page_mask;
173extern int		vm_page_shift;
174#endif
175
176#ifndef MACH_KERNEL
177#define	ptoa(p)	((p)*vm_page_size)
178#define	atop(a)	((a)/vm_page_size)
179#endif
180#define	howmany(a,b)	((((a) % (b)) == 0) ? ((a) / (b)) : (((a) / (b)) + 1))
181
182extern memory_object_default_t	default_pager_object;
183
184#ifdef MACH_KERNEL
185extern lck_mtx_t		dpt_lock;	/* Lock for the dpt array */
186extern int	default_pager_internal_count;
187extern MACH_PORT_FACE	default_pager_host_port;
188/* extern task_t		default_pager_self; */  /* dont need or want */
189extern MACH_PORT_FACE	default_pager_internal_set;
190extern MACH_PORT_FACE	default_pager_external_set;
191extern MACH_PORT_FACE	default_pager_default_set;
192#else
193extern mach_port_t	default_pager_host_port;
194extern task_port_t	default_pager_self;
195extern mach_port_t	default_pager_internal_set;
196extern mach_port_t	default_pager_external_set;
197extern mach_port_t	default_pager_default_set;
198#endif
199
200typedef vm32_offset_t dp_offset_t;
201typedef vm32_size_t dp_size_t;
202typedef vm32_address_t dp_address_t;
203
204typedef struct default_pager_thread {
205#ifndef MACH_KERNEL
206	cthread_t	dpt_thread;	/* Server thread. */
207#endif
208	vm_offset_t	dpt_buffer;	/* Read buffer. */
209	boolean_t	dpt_internal;	/* Do we handle internal objects? */
210#ifndef MACH_KERNEL
211	int		dpt_id;		/* thread id for printf */
212#else
213	int		checked_out;
214#endif
215	boolean_t	dpt_initialized_p; /* Thread is ready for requests.  */
216} default_pager_thread_t;
217
218#ifdef MACH_KERNEL
219extern default_pager_thread_t	**dpt_array;
220#endif
221
222/*
223 * Global statistics.
224 */
225struct global_stats {
226	unsigned int	gs_pageout_calls;	/* # pageout calls */
227	unsigned int	gs_pagein_calls;	/* # pagein calls */
228	unsigned int	gs_pages_in;		/* # pages paged in (total) */
229	unsigned int	gs_pages_out;		/* # pages paged out (total) */
230	unsigned int	gs_pages_unavail;	/* # zero-fill pages */
231	unsigned int	gs_pages_init;		/* # page init requests */
232	unsigned int	gs_pages_init_writes;	/* # page init writes */
233	VSTATS_LOCK_DECL(gs_lock)
234};
235extern struct global_stats global_stats;
236#define GSTAT(clause)	VSTATS_ACTION(&global_stats.gs_lock, (clause))
237
238/*
239 * Cluster related definitions.
240 * Clusters are sized in number of pages per cluster.
241 * Cluster sizes must be powers of two.
242 *
243 * These numbers are related to the struct vs_map,
244 * defined below.
245 */
246#define MAX_CLUSTER_SIZE 8
247#define MAX_CLUSTER_SHIFT 3
248#define NO_CLSIZE 0
249
250/*
251 * bit map related macros
252 */
253#define	NBBY		8	/* bits per byte XXX */
254#define BYTEMASK	0xff
255#define setbit(a,i)	(*(((char *)(a)) + ((i)/NBBY)) |= 1<<((i)%NBBY))
256#define clrbit(a,i)	(*(((char *)(a)) + ((i)/NBBY)) &= ~(1<<((i)%NBBY)))
257#define isset(a,i)	(*(((char *)(a)) + ((i)/NBBY)) & (1<<((i)%NBBY)))
258#define isclr(a,i)	((*(((char *)(a)) + ((i)/NBBY)) & (1<<((i)%NBBY))) == 0)
259
260/*
261 *	Default Pager.
262 *		Backing Store Management.
263 */
264
265#define BS_MAXPRI	4
266#define BS_MINPRI	0
267#define BS_NOPRI	-1
268#define BS_FULLPRI	-2
269
270/*
271 * Quick way to access the emergency segment backing store structures
272 * without a full-blown search.
273 */
274extern MACH_PORT_FACE		emergency_segment_backing_store;
275
276/*
277 * Mapping between backing store port and backing store object.
278 */
279struct backing_store {
280	queue_chain_t	bs_links;	/* link in backing_store_list */
281	lck_mtx_t		bs_lock;	/* lock for the structure */
282	MACH_PORT_FACE	bs_port;	/* backing store port */
283	int		bs_priority;
284	int		bs_clsize;	/* cluster size in pages */
285
286	/* statistics */
287	unsigned int	bs_pages_free;		/* # unallocated pages */
288	unsigned int	bs_pages_total;		/* # pages (total) */
289	unsigned int	bs_pages_in;		/* # page read requests */
290	unsigned int	bs_pages_in_fail;	/* # page read errors */
291	unsigned int	bs_pages_out;		/* # page write requests */
292	unsigned int	bs_pages_out_fail;	/* # page write errors */
293};
294typedef struct backing_store 	*backing_store_t;
295#define	BACKING_STORE_NULL	((backing_store_t) 0)
296#define BS_STAT(bs, clause)	VSTATS_ACTION(&(bs)->bs_lock, (clause))
297
298#ifdef MACH_KERNEL
299#define BS_LOCK_INIT(bs)	lck_mtx_init(&(bs)->bs_lock, &default_pager_lck_grp, &default_pager_lck_attr)
300#define BS_LOCK_DESTROY(bs)	lck_mtx_destroy(&(bs)->bs_lock, &default_pager_lck_grp)
301#define BS_LOCK(bs)			lck_mtx_lock(&(bs)->bs_lock)
302#define BS_UNLOCK(bs)		lck_mtx_unlock(&(bs)->bs_lock)
303
304struct backing_store_list_head {
305	queue_head_t	bsl_queue;
306	lck_mtx_t 	bsl_lock;
307#endif
308};
309extern struct backing_store_list_head	backing_store_list;
310extern int	backing_store_release_trigger_disable;
311
312#define	BSL_LOCK_INIT()		lck_mtx_init(&backing_store_list.bsl_lock, &default_pager_lck_grp, &default_pager_lck_attr)
313#define	BSL_LOCK_DESTROY()	lck_mtx_destroy(&backing_store_list.bsl_lock, &default_pager_lck_grp)
314#define BSL_LOCK()			lck_mtx_lock(&backing_store_list.bsl_lock)
315#define BSL_UNLOCK()		lck_mtx_unlock(&backing_store_list.bsl_lock)
316
317/*
318 * 	Paging segment management.
319 * 	Controls allocation of blocks within paging area.
320 */
321struct paging_segment {
322	/* device management */
323	union {
324		MACH_PORT_FACE	dev;		/* Port to device */
325	 	struct vnode	*vnode;		/* vnode for bs file */
326	} storage_type;
327	unsigned int	ps_segtype;	/* file type or partition */
328	MACH_PORT_FACE	ps_device;	/* Port to device */
329	dp_offset_t	ps_offset;	/* Offset of segment within device */
330	dp_offset_t	ps_recnum;	/* Number of device records in segment*/
331	unsigned int	ps_pgnum;	/* Number of pages in segment */
332	unsigned int	ps_record_shift;/* Bit shift: pages to device records */
333
334	/* clusters and pages */
335	unsigned int	ps_clshift;	/* Bit shift: clusters to pages */
336	unsigned int	ps_ncls;	/* Number of clusters in segment */
337	unsigned int	ps_clcount;	/* Number of free clusters */
338	unsigned int	ps_pgcount;	/* Number of free pages */
339	unsigned int	ps_hint;	/* Hint of where to look next. */
340	unsigned int	ps_special_clusters; /* Clusters that might come in while we've
341					* released the locks doing a ps_delete.
342					*/
343
344	/* bitmap */
345	lck_mtx_t		ps_lock;	/* Lock for contents of struct */
346	unsigned char	*ps_bmap;	/* Map of used clusters */
347
348	/* backing store */
349	backing_store_t	ps_bs;		/* Backing store segment belongs to */
350#define	PS_CAN_USE		0x1
351#define	PS_GOING_AWAY		0x2
352#define PS_EMERGENCY_SEGMENT	0x4
353	unsigned int	ps_state;
354};
355
356#define IS_PS_OK_TO_USE(ps)		((ps->ps_state & PS_CAN_USE) == PS_CAN_USE)
357#define IS_PS_GOING_AWAY(ps)		((ps->ps_state & PS_GOING_AWAY) == PS_GOING_AWAY)
358#define IS_PS_EMERGENCY_SEGMENT(ps)	((ps->ps_state & PS_EMERGENCY_SEGMENT) == PS_EMERGENCY_SEGMENT)
359
360#define ps_vnode	storage_type.vnode
361#define ps_device	storage_type.dev
362#define PS_PARTITION 1
363#define PS_FILE	2
364
365typedef struct paging_segment *paging_segment_t;
366
367#define PAGING_SEGMENT_NULL	((paging_segment_t) 0)
368
369#define PS_LOCK_INIT(ps)	lck_mtx_init(&(ps)->ps_lock, &default_pager_lck_grp, &default_pager_lck_attr)
370#define PS_LOCK_DESTROY(ps)	lck_mtx_destroy(&(ps)->ps_lock, &default_pager_lck_grp)
371#define PS_LOCK(ps)			lck_mtx_lock(&(ps)->ps_lock)
372#define PS_UNLOCK(ps)		lck_mtx_unlock(&(ps)->ps_lock)
373
374typedef unsigned int	pseg_index_t;
375
376#define	INVALID_PSEG_INDEX	((pseg_index_t)-1)
377#define EMERGENCY_PSEG_INDEX		((pseg_index_t) 0)
378/*
379 * MAX_PSEG_INDEX value is related to struct vs_map below.
380 * "0" is reserved for empty map entries (no segment).
381 */
382#define MAX_PSEG_INDEX	63	/* 0 is reserved for empty map */
383#define MAX_NUM_PAGING_SEGMENTS MAX_PSEG_INDEX
384
385/* paging segments array */
386extern paging_segment_t	paging_segments[MAX_NUM_PAGING_SEGMENTS];
387extern lck_mtx_t paging_segments_lock;
388extern int	paging_segment_count;	/* number of active paging segments */
389extern int	paging_segment_max;	/* highest used paging segment index */
390extern int ps_select_array[DEFAULT_PAGER_BACKING_STORE_MAXPRI+1];
391
392#define	PSL_LOCK_INIT()		lck_mtx_init(&paging_segments_lock, &default_pager_lck_grp, &default_pager_lck_attr)
393#define	PSL_LOCK_DESTROY()	lck_mtx_destroy(&paging_segments_lock, &default_pager_lck_grp)
394#define PSL_LOCK()		lck_mtx_lock(&paging_segments_lock)
395#define PSL_UNLOCK()	lck_mtx_unlock(&paging_segments_lock)
396
397/*
398 * Vstruct manipulation.  The vstruct is the pager's internal
399 * representation of vm objects it manages.  There is one vstruct allocated
400 * per vm object.
401 *
402 * The following data structures are defined for vstruct and vm object
403 * management.
404 */
405
406/*
407 * vs_map
408 * A structure used only for temporary objects.  It is the element
409 * contained in the vs_clmap structure, which contains information
410 * about which clusters and pages in an object are present on backing
411 * store (a paging file).
412 * Note that this structure and its associated constants may change
413 * with minimal impact on code.  The only function which knows the
414 * internals of this structure is ps_clmap().
415 *
416 * If it is necessary to change the maximum number of paging segments
417 * or pages in a cluster, then this structure is the one most
418 * affected.   The constants and structures which *may* change are:
419 *	MAX_CLUSTER_SIZE
420 *	MAX_CLUSTER_SHIFT
421 *	MAX_NUM_PAGING_SEGMENTS
422 *	VSTRUCT_DEF_CLSHIFT
423 *	struct vs_map and associated macros and constants (VSM_*)
424 *	  (only the macro definitions need change, the exported (inside the
425 *	   pager only) interfaces remain the same; the constants are for
426 *	   internal vs_map manipulation only).
427 *	struct clbmap (below).
428 */
429struct vs_map {
430	unsigned int	vsmap_entry:23,		/* offset in paging segment */
431			vsmap_psindex:8,	/* paging segment */
432			vsmap_error:1,
433			vsmap_bmap:16,
434			vsmap_alloc:16;
435};
436
437typedef struct vs_map *vs_map_t;
438
439
440#define	VSM_ENTRY_NULL	0x7fffff
441
442/*
443 * Exported macros for manipulating the vs_map structure --
444 * checking status, getting and setting bits.
445 */
446#define	VSCLSIZE(vs)		(1U << (vs)->vs_clshift)
447#define	VSM_ISCLR(vsm)		(((vsm).vsmap_entry == VSM_ENTRY_NULL) &&   \
448					((vsm).vsmap_error == 0))
449#define	VSM_ISERR(vsm)		((vsm).vsmap_error)
450#define	VSM_SETCLOFF(vsm, val)	((vsm).vsmap_entry = (val))
451#define	VSM_SETERR(vsm, err)	((vsm).vsmap_error = 1,   \
452					(vsm).vsmap_entry = (err))
453#define	VSM_GETERR(vsm)		((vsm).vsmap_entry)
454#define	VSM_SETPG(vsm, page)	((vsm).vsmap_bmap |= (1 << (page)))
455#define	VSM_CLRPG(vsm, page)	((vsm).vsmap_bmap &= ~(1 << (page)))
456#define	VSM_SETPS(vsm, psindx)	((vsm).vsmap_psindex = (psindx))
457#define	VSM_PSINDEX(vsm)	((vsm).vsmap_psindex)
458#define	VSM_PS(vsm)		paging_segments[(vsm).vsmap_psindex]
459#define	VSM_BMAP(vsm)		((vsm).vsmap_bmap)
460#define	VSM_CLOFF(vsm)		((vsm).vsmap_entry)
461#define	VSM_CLR(vsm)		((vsm).vsmap_entry = VSM_ENTRY_NULL,   \
462					(vsm).vsmap_psindex = 0,   \
463					(vsm).vsmap_error = 0,	   \
464					(vsm).vsmap_bmap = 0,	   \
465					(vsm).vsmap_alloc = 0)
466#define	VSM_ALLOC(vsm)		((vsm).vsmap_alloc)
467#define	VSM_SETALLOC(vsm, page)	((vsm).vsmap_alloc |= (1 << (page)))
468#define	VSM_CLRALLOC(vsm, page)	((vsm).vsmap_alloc &= ~(1 << (page)))
469
470/*
471 * Constants and macros for dealing with vstruct maps,
472 * which comprise vs_map structures, which
473 * map vm objects to backing storage (paging files and clusters).
474 */
475#define CLMAP_THRESHOLD	512 	/* bytes */
476#define	CLMAP_ENTRIES		(CLMAP_THRESHOLD/(int)sizeof(struct vs_map))
477#define	CLMAP_SIZE(ncls)	(ncls*(int)sizeof(struct vs_map))
478
479#define	INDIRECT_CLMAP_ENTRIES(ncls) (((ncls-1)/CLMAP_ENTRIES) + 1)
480#define INDIRECT_CLMAP_SIZE(ncls) (INDIRECT_CLMAP_ENTRIES(ncls) * (int)sizeof(struct vs_map *))
481#define INDIRECT_CLMAP(size)	(CLMAP_SIZE(size) > CLMAP_THRESHOLD)
482
483#define RMAPSIZE(blocks) 	(howmany(blocks,NBBY))
484
485#define CL_FIND 1
486#define CL_ALLOC 2
487
488/*
489 * clmap
490 *
491 * A cluster map returned by ps_clmap.  It is an abstracted cluster of
492 * pages.  It gives the caller information about the cluster
493 * desired.  On read it tells the caller if a cluster is mapped, and if so,
494 * which of its pages are valid.  It should not be referenced directly,
495 * except by  ps_clmap; macros should be used.  If the number of pages
496 * in a cluster needs to be more than 32, then the struct clbmap must
497 * become larger.
498 */
499struct clbmap {
500	unsigned int	clb_map;
501};
502
503struct clmap {
504	paging_segment_t cl_ps;		/* paging segment backing cluster */
505	int		cl_numpages;	/* number of valid pages */
506	struct clbmap	cl_bmap;	/* map of pages in cluster */
507	int		cl_error;	/* cluster error value */
508	struct clbmap	cl_alloc;	/* map of allocated pages in cluster */
509};
510
511#define  CLMAP_ERROR(clm)	(clm).cl_error
512#define  CLMAP_PS(clm)		(clm).cl_ps
513#define  CLMAP_NPGS(clm)	(clm).cl_numpages
514#define	 CLMAP_ISSET(clm,i)	((1<<(i))&((clm).cl_bmap.clb_map))
515#define  CLMAP_ALLOC(clm)	(clm).cl_alloc.clb_map
516/*
517 * Shift off unused bits in a partial cluster
518 */
519#define  CLMAP_SHIFT(clm,vs)	\
520	(clm)->cl_bmap.clb_map >>= (VSCLSIZE(vs) - (clm)->cl_numpages)
521#define  CLMAP_SHIFTALLOC(clm,vs)	\
522	(clm)->cl_alloc.clb_map >>= (VSCLSIZE(vs) - (clm)->cl_numpages)
523
524typedef struct vstruct_alias {
525	memory_object_pager_ops_t name;
526	struct vstruct *vs;
527} vstruct_alias_t;
528
529#define DPT_LOCK_INIT(lock)		lck_mtx_init(&(lock), &default_pager_lck_grp, &default_pager_lck_attr)
530#define DPT_LOCK_DESTROY(lock)		lck_mtx_destroy(&(lock), &default_pager_lck_grp)
531#define DPT_LOCK(lock)			lck_mtx_lock(&(lock))
532#define DPT_UNLOCK(lock)		lck_mtx_unlock(&(lock))
533#define DPT_SLEEP(lock, e, i)	lck_mtx_sleep(&(lock), LCK_SLEEP_DEFAULT, (event_t)(e), i)
534#define VS_LOCK_TYPE			hw_lock_data_t
535#define VS_LOCK_INIT(vs)		hw_lock_init(&(vs)->vs_lock)
536#define VS_TRY_LOCK(vs)			(VS_LOCK(vs),TRUE)
537#define VS_LOCK(vs)				hw_lock_lock(&(vs)->vs_lock)
538#define VS_UNLOCK(vs)			hw_lock_unlock(&(vs)->vs_lock)
539#define VS_MAP_LOCK_TYPE		lck_mtx_t
540#define VS_MAP_LOCK_INIT(vs)	lck_mtx_init(&(vs)->vs_map_lock, &default_pager_lck_grp, &default_pager_lck_attr)
541#define VS_MAP_LOCK_DESTROY(vs)	lck_mtx_destroy(&(vs)->vs_map_lock, &default_pager_lck_grp)
542#define VS_MAP_LOCK(vs)			lck_mtx_lock(&(vs)->vs_map_lock)
543#define VS_MAP_TRY_LOCK(vs)		lck_mtx_try_lock(&(vs)->vs_map_lock)
544#define VS_MAP_UNLOCK(vs)		lck_mtx_unlock(&(vs)->vs_map_lock)
545
546
547/*
548 * VM Object Structure:  This is the structure used to manage
549 * default pager object associations with their control counter-
550 * parts (VM objects).
551 *
552 * The start of this structure MUST match a "struct memory_object".
553 */
554typedef struct vstruct {
555	struct ipc_object_header	vs_pager_header;	/* fake ip_kotype() */
556	memory_object_pager_ops_t vs_pager_ops; /* == &default_pager_ops */
557	memory_object_control_t vs_control;	/* our mem obj control ref */
558	VS_LOCK_TYPE		vs_lock;	/* data for the lock */
559
560	/* JMM - Could combine these first two in a single pending count now */
561	unsigned int		vs_next_seqno;	/* next sequence num to issue */
562	unsigned int		vs_seqno;	/* Pager port sequence number */
563	unsigned int		vs_readers;	/* Reads in progress */
564	unsigned int		vs_writers;	/* Writes in progress */
565
566	unsigned int
567	/* boolean_t */		vs_waiting_seqno:1,	/* to wait on seqno */
568	/* boolean_t */		vs_waiting_read:1, 	/* waiting on reader? */
569	/* boolean_t */		vs_waiting_write:1,	/* waiting on writer? */
570	/* boolean_t */		vs_waiting_async:1,	/* waiting on async? */
571	/* boolean_t */		vs_indirect:1,		/* map indirect? */
572	/* boolean_t */		vs_xfer_pending:1;	/* xfer out of seg? */
573
574	unsigned int		vs_async_pending;/* pending async write count */
575	unsigned int		vs_errors;	/* Pageout error count */
576	unsigned int		vs_references;	/* references */
577
578	queue_chain_t		vs_links;	/* Link in pager-wide list */
579
580	unsigned int		vs_clshift;	/* Bit shift: clusters->pages */
581	unsigned int		vs_size;	/* Object size in clusters */
582	lck_mtx_t		vs_map_lock;	/* to protect map below */
583	union {
584		struct vs_map	*vsu_dmap;	/* Direct map of clusters */
585		struct vs_map	**vsu_imap;	/* Indirect map of clusters */
586	} vs_un;
587} *vstruct_t;
588
589#define vs_dmap vs_un.vsu_dmap
590#define vs_imap vs_un.vsu_imap
591
592#define VSTRUCT_NULL	((vstruct_t) 0)
593
594__private_extern__ void vs_async_wait(vstruct_t);
595
596#if PARALLEL
597__private_extern__ void vs_lock(vstruct_t);
598__private_extern__ void vs_unlock(vstruct_t);
599__private_extern__ void vs_start_read(vstruct_t);
600__private_extern__ void vs_finish_read(vstruct_t);
601__private_extern__ void vs_wait_for_readers(vstruct_t);
602__private_extern__ void vs_start_write(vstruct_t);
603__private_extern__ void vs_finish_write(vstruct_t);
604__private_extern__ void vs_wait_for_writers(vstruct_t);
605__private_extern__ void vs_wait_for_sync_writers(vstruct_t);
606#else	/* PARALLEL */
607#define	vs_lock(vs)
608#define	vs_unlock(vs)
609#define	vs_start_read(vs)
610#define	vs_wait_for_readers(vs)
611#define	vs_finish_read(vs)
612#define	vs_start_write(vs)
613#define	vs_wait_for_writers(vs)
614#define	vs_wait_for_sync_writers(vs)
615#define	vs_finish_write(vs)
616#endif /* PARALLEL */
617
618/*
619 * Data structures and variables dealing with asynchronous
620 * completion of paging operations.
621 */
622/*
623 * vs_async
624 * 	A structure passed to ps_write_device for asynchronous completions.
625 * 	It contains enough information to complete the write and
626 *	inform the VM of its completion.
627 */
628struct vs_async {
629	struct vs_async	*vsa_next;	/* pointer to next structure */
630	vstruct_t	vsa_vs;		/* the vstruct for the object */
631	vm_offset_t	vsa_addr;	/* the vaddr of the data moved */
632	vm_offset_t	vsa_offset;	/* the object offset of the data */
633	vm_size_t	vsa_size;	/* the number of bytes moved */
634	paging_segment_t vsa_ps;	/* the paging segment used */
635	int		vsa_flags;	/* flags */
636	int		vsa_error;	/* error, if there is one */
637	MACH_PORT_FACE	reply_port;	/* associated reply port */
638};
639
640/*
641 * flags values.
642 */
643#define VSA_READ	0x0001
644#define VSA_WRITE	0x0002
645#define VSA_TRANSFER	0x0004
646
647/*
648 * List of all vstructs.  A specific vstruct is
649 * found directly via its port, this list is
650 * only used for monitoring purposes by the
651 * default_pager_object* calls
652 */
653struct vstruct_list_head {
654	queue_head_t	vsl_queue;
655	lck_mtx_t		vsl_lock;
656	int		vsl_count;	/* saves code */
657};
658
659extern struct vstruct_list_head	vstruct_list;
660
661__private_extern__ void vstruct_list_insert(vstruct_t vs);
662__private_extern__ void vstruct_list_delete(vstruct_t vs);
663
664
665extern lck_grp_t		default_pager_lck_grp;
666extern lck_attr_t		default_pager_lck_attr;
667
668#define VSL_LOCK_INIT()		lck_mtx_init(&vstruct_list.vsl_lock, &default_pager_lck_grp, &default_pager_lck_attr)
669#define VSL_LOCK_DESTROY()	lck_mtx_destroy(&vstruct_list.vsl_lock, &default_pager_lck_grp)
670#define VSL_LOCK()			lck_mtx_lock(&vstruct_list.vsl_lock)
671#define VSL_LOCK_TRY()		lck_mtx_try_lock(&vstruct_list.vsl_lock)
672#define VSL_UNLOCK()		lck_mtx_unlock(&vstruct_list.vsl_lock)
673#define VSL_SLEEP(e,i)		lck_mtx_sleep(&vstruct_list.vsl_lock, LCK_SLEEP_DEFAULT, (e), (i))
674
675#ifdef MACH_KERNEL
676extern zone_t	vstruct_zone;
677#endif
678
679/*
680 * Create port alias for vstruct address.
681 *
682 * We assume that the last two bits of a vstruct address will be zero due to
683 * memory allocation restrictions, hence are available for use as a sanity
684 * check.
685 */
686#ifdef MACH_KERNEL
687
688extern const struct memory_object_pager_ops default_pager_ops;
689
690#define mem_obj_is_vs(_mem_obj_)					\
691	(((_mem_obj_) != NULL) &&					\
692	 ((_mem_obj_)->mo_pager_ops == &default_pager_ops))
693#define mem_obj_to_vs(_mem_obj_)					\
694	((vstruct_t)(_mem_obj_))
695#define vs_to_mem_obj(_vs_) ((memory_object_t)(_vs_))
696#define vs_lookup(_mem_obj_, _vs_)					\
697	do {								\
698	if (!mem_obj_is_vs(_mem_obj_))					\
699		panic("bad dp memory object");				\
700	_vs_ = mem_obj_to_vs(_mem_obj_);				\
701	} while (0)
702#define vs_lookup_safe(_mem_obj_, _vs_)					\
703	do {								\
704	if (!mem_obj_is_vs(_mem_obj_))					\
705		_vs_ = VSTRUCT_NULL;					\
706	else								\
707		_vs_ = mem_obj_to_vs(_mem_obj_);			\
708	} while (0)
709#else
710
711#define	vs_to_port(_vs_)	(((vm_offset_t)(_vs_))+1)
712#define	port_to_vs(_port_)	((vstruct_t)(((vm_offset_t)(_port_))&~3))
713#define port_is_vs(_port_)	((((vm_offset_t)(_port_))&3) == 1)
714
715#define vs_lookup(_port_, _vs_)						\
716	do {								\
717		if (!MACH_PORT_VALID(_port_) || !port_is_vs(_port_)	\
718		    || port_to_vs(_port_)->vs_mem_obj != (_port_))	\
719			Panic("bad pager port");			\
720		_vs_ = port_to_vs(_port_);				\
721	} while (0)
722#endif
723
724/*
725 * Cross-module routines declaration.
726 */
727#ifndef MACH_KERNEL
728extern int		dp_thread_id(void);
729#endif
730extern boolean_t	device_reply_server(mach_msg_header_t *,
731					    mach_msg_header_t *);
732#ifdef MACH_KERNEL
733extern boolean_t	default_pager_no_senders(memory_object_t,
734						 mach_port_mscount_t);
735#else
736extern void		default_pager_no_senders(memory_object_t,
737						 mach_port_seqno_t,
738						 mach_port_mscount_t);
739#endif
740
741extern int		local_log2(unsigned int);
742extern void		bs_initialize(void);
743extern void		bs_global_info(uint64_t *,
744				       uint64_t *);
745extern boolean_t	bs_add_device(char *,
746				      MACH_PORT_FACE);
747extern vstruct_t	ps_vstruct_create(dp_size_t);
748extern void		ps_vstruct_dealloc(vstruct_t);
749extern kern_return_t	ps_vstruct_reclaim(vstruct_t,
750					   boolean_t,
751					   boolean_t);
752extern kern_return_t	pvs_cluster_read(vstruct_t,
753					 dp_offset_t,
754					 dp_size_t,
755					 void *);
756extern kern_return_t	vs_cluster_write(vstruct_t,
757					 upl_t,
758					 upl_offset_t,
759					 upl_size_t,
760					 boolean_t,
761					 int);
762extern dp_offset_t	ps_clmap(vstruct_t,
763				 dp_offset_t,
764				 struct clmap *,
765				 int,
766				 dp_size_t,
767				 int);
768extern vm_size_t	ps_vstruct_allocated_size(vstruct_t);
769extern unsigned int	ps_vstruct_allocated_pages(vstruct_t,
770						   default_pager_page_t *,
771						   unsigned int);
772extern boolean_t	bs_set_default_clsize(unsigned int);
773
774extern boolean_t	verbose;
775
776extern thread_call_t	default_pager_backing_store_monitor_callout;
777extern void		default_pager_backing_store_monitor(thread_call_param_t, thread_call_param_t);
778
779extern ipc_port_t	max_pages_trigger_port;
780extern unsigned int	dp_pages_free;
781extern unsigned int	maximum_pages_free;
782
783/* Do we know yet if swap files need to be encrypted ? */
784extern boolean_t	dp_encryption_inited;
785/* Should we encrypt data before writing to swap ? */
786extern boolean_t	dp_encryption;
787
788extern boolean_t	dp_isssd;
789
790#endif	/* _DEFAULT_PAGER_INTERNAL_H_ */
791