1/*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
49 *  School of Computer Science
50 *  Carnegie Mellon University
51 *  Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 *	File:	zalloc.h
60 *	Author:	Avadis Tevanian, Jr.
61 *	Date:	 1985
62 *
63 */
64
65#ifdef	KERNEL_PRIVATE
66
67#ifndef	_KERN_ZALLOC_H_
68#define _KERN_ZALLOC_H_
69
70#include <mach/machine/vm_types.h>
71#include <kern/kern_types.h>
72#include <sys/cdefs.h>
73
74#ifdef	MACH_KERNEL_PRIVATE
75
76#include <zone_debug.h>
77#include <kern/lock.h>
78#include <kern/locks.h>
79#include <kern/queue.h>
80#include <kern/thread_call.h>
81
82#if	CONFIG_GZALLOC
83typedef struct gzalloc_data {
84	uint32_t	gzfc_index;
85	vm_offset_t 	*gzfc;
86} gzalloc_data_t;
87#endif
88
89/*
90 *	A zone is a collection of fixed size blocks for which there
91 *	is fast allocation/deallocation access.  Kernel routines can
92 *	use zones to manage data structures dynamically, creating a zone
93 *	for each type of data structure to be managed.
94 *
95 */
96
97struct zone {
98	int		count;		/* Number of elements used now */
99	vm_offset_t	free_elements;
100	decl_lck_mtx_data(,lock)	/* zone lock */
101	lck_mtx_ext_t   lock_ext;	/* placeholder for indirect mutex */
102	lck_attr_t      lock_attr;	/* zone lock attribute */
103	lck_grp_t       lock_grp;	/* zone lock group */
104	lck_grp_attr_t  lock_grp_attr;	/* zone lock group attribute */
105	vm_size_t	cur_size;	/* current memory utilization */
106	vm_size_t	max_size;	/* how large can this zone grow */
107	vm_size_t	elem_size;	/* size of an element */
108	vm_size_t	alloc_size;	/* size used for more memory */
109	uint64_t	sum_count;	/* count of allocs (life of zone) */
110	unsigned int
111	/* boolean_t */ exhaustible :1,	/* (F) merely return if empty? */
112	/* boolean_t */	collectable :1,	/* (F) garbage collect empty pages */
113	/* boolean_t */	expandable :1,	/* (T) expand zone (with message)? */
114	/* boolean_t */ allows_foreign :1,/* (F) allow non-zalloc space */
115	/* boolean_t */	doing_alloc :1,	/* is zone expanding now? */
116	/* boolean_t */	waiting :1,	/* is thread waiting for expansion? */
117	/* boolean_t */	async_pending :1,	/* asynchronous allocation pending? */
118#if CONFIG_ZLEAKS
119	/* boolean_t */ zleak_on :1,	/* Are we collecting allocation information? */
120#endif	/* CONFIG_ZLEAKS */
121	/* boolean_t */	caller_acct: 1, /* do we account allocation/free to the caller? */
122	/* boolean_t */	doing_gc :1,	/* garbage collect in progress? */
123	/* boolean_t */ noencrypt :1,
124	/* boolean_t */	no_callout:1,
125	/* boolean_t */	async_prio_refill:1,
126	/* boolean_t */	gzalloc_exempt:1,
127	/* boolean_t */	alignment_required:1;
128	int		index;		/* index into zone_info arrays for this zone */
129	struct zone *	next_zone;	/* Link for all-zones list */
130	thread_call_data_t call_async_alloc;	/* callout for asynchronous alloc */
131	const char	*zone_name;	/* a name for the zone */
132#if	ZONE_DEBUG
133	queue_head_t	active_zones;	/* active elements */
134#endif	/* ZONE_DEBUG */
135
136#if CONFIG_ZLEAKS
137	uint32_t num_allocs;		/* alloc stats for zleak benchmarks */
138	uint32_t num_frees;		/* free stats for zleak benchmarks */
139	uint32_t zleak_capture;		/* per-zone counter for capturing every N allocations */
140#endif /* CONFIG_ZLEAKS */
141	uint32_t free_check_count;	/* counter for poisoning/checking every N frees */
142	vm_size_t	prio_refill_watermark;
143	thread_t	zone_replenish_thread;
144#if	CONFIG_GZALLOC
145	gzalloc_data_t	gz;
146#endif /* CONFIG_GZALLOC */
147};
148
149/*
150 *	structure for tracking zone usage
151 *	Used either one per task/thread for all zones or <per-task,per-zone>.
152 */
153typedef struct zinfo_usage_store_t {
154	/* These fields may be updated atomically, and so must be 8 byte aligned */
155	uint64_t	alloc __attribute__((aligned(8)));		/* allocation counter */
156	uint64_t	free __attribute__((aligned(8)));		/* free counter */
157} zinfo_usage_store_t;
158typedef zinfo_usage_store_t *zinfo_usage_t;
159
160extern void		zone_gc(boolean_t);
161extern void		consider_zone_gc(boolean_t);
162
163/* Steal memory for zone module */
164extern void		zone_steal_memory(void);
165
166/* Bootstrap zone module (create zone zone) */
167extern void		zone_bootstrap(void) __attribute__((section("__TEXT, initcode")));
168
169/* Init zone module */
170extern void		zone_init(
171					vm_size_t	map_size) __attribute__((section("__TEXT, initcode")));
172
173/* Handle per-task zone info */
174extern void		zinfo_task_init(task_t task);
175extern void		zinfo_task_free(task_t task);
176
177
178/* Stack use statistics */
179extern void		stack_fake_zone_init(int zone_index);
180extern void		stack_fake_zone_info(
181					int			*count,
182					vm_size_t	*cur_size,
183					vm_size_t	*max_size,
184					vm_size_t	*elem_size,
185					vm_size_t	*alloc_size,
186					uint64_t	*sum_size,
187					int			*collectable,
188					int			*exhaustable,
189					int		*caller_acct);
190
191#if		ZONE_DEBUG
192
193extern void		zone_debug_enable(
194				zone_t		z);
195
196extern void		zone_debug_disable(
197				zone_t		z);
198
199#define zone_debug_enabled(z) z->active_zones.next
200#define	ROUNDUP(x,y)		((((x)+(y)-1)/(y))*(y))
201#define ZONE_DEBUG_OFFSET	ROUNDUP(sizeof(queue_chain_t),16)
202#endif	/* ZONE_DEBUG */
203
204#endif	/* MACH_KERNEL_PRIVATE */
205
206__BEGIN_DECLS
207
208#ifdef	XNU_KERNEL_PRIVATE
209
210/* Allocate from zone */
211extern void *	zalloc(
212					zone_t		zone);
213
214/* Free zone element */
215extern void		zfree(
216					zone_t		zone,
217					void 		*elem);
218
219/* Create zone */
220extern zone_t	zinit(
221					vm_size_t	size,		/* the size of an element */
222					vm_size_t	maxmem,		/* maximum memory to use */
223					vm_size_t	alloc,		/* allocation size */
224					const char	*name);		/* a name for the zone */
225
226
227/* Non-blocking version of zalloc */
228extern void *	zalloc_noblock(
229					zone_t		zone);
230
231/* direct (non-wrappered) interface */
232extern void *	zalloc_canblock(
233					zone_t		zone,
234					boolean_t	canblock);
235
236/* Get from zone free list */
237extern void *	zget(
238					zone_t		zone);
239
240/* Fill zone with memory */
241extern void		zcram(
242					zone_t		zone,
243					vm_offset_t	newmem,
244					vm_size_t	size);
245
246/* Initially fill zone with specified number of elements */
247extern int		zfill(
248					zone_t		zone,
249					int			nelem);
250
251/* Change zone parameters */
252extern void		zone_change(
253					zone_t			zone,
254					unsigned int	item,
255					boolean_t		value);
256extern void		zone_prio_refill_configure(zone_t, vm_size_t);
257/* Item definitions */
258#define Z_EXHAUST	1	/* Make zone exhaustible	*/
259#define Z_COLLECT	2	/* Make zone collectable	*/
260#define Z_EXPAND	3	/* Make zone expandable		*/
261#define	Z_FOREIGN	4	/* Allow collectable zone to contain foreign elements */
262#define Z_CALLERACCT	5	/* Account alloc/free against the caller */
263#define Z_NOENCRYPT	6	/* Don't encrypt zone during hibernation */
264#define Z_NOCALLOUT 	7	/* Don't asynchronously replenish the zone via
265				 * callouts
266				 */
267#define Z_ALIGNMENT_REQUIRED 8
268#define Z_GZALLOC_EXEMPT 9	/* Not tracked in guard allocation mode */
269/* Preallocate space for zone from zone map */
270extern void		zprealloc(
271					zone_t		zone,
272					vm_size_t	size);
273
274extern integer_t	zone_free_count(
275						zone_t		zone);
276
277/*
278 * MAX_ZTRACE_DEPTH configures how deep of a stack trace is taken on each zalloc in the zone of interest.  15
279 * levels is usually enough to get past all the layers of code in kalloc and IOKit and see who the actual
280 * caller is up above these lower levels.
281 *
282 * This is used both for the zone leak detector and the zone corruption log.
283 */
284
285#define MAX_ZTRACE_DEPTH	15
286
287/*
288 *  Structure for keeping track of a backtrace, used for leak detection.
289 *  This is in the .h file because it is used during panic, see kern/debug.c
290 *  A non-zero size indicates that the trace is in use.
291 */
292struct ztrace {
293	vm_size_t		zt_size;			/* How much memory are all the allocations referring to this trace taking up? */
294	uint32_t		zt_depth;			/* depth of stack (0 to MAX_ZTRACE_DEPTH) */
295	void*			zt_stack[MAX_ZTRACE_DEPTH];	/* series of return addresses from OSBacktrace */
296	uint32_t		zt_collisions;			/* How many times did a different stack land here while it was occupied? */
297	uint32_t		zt_hit_count;			/* for determining effectiveness of hash function */
298};
299
300#if CONFIG_ZLEAKS
301
302/* support for the kern.zleak.* sysctls */
303
304extern kern_return_t zleak_activate(void);
305extern vm_size_t zleak_max_zonemap_size;
306extern vm_size_t zleak_global_tracking_threshold;
307extern vm_size_t zleak_per_zone_tracking_threshold;
308
309extern int get_zleak_state(void);
310
311#endif	/* CONFIG_ZLEAKS */
312
313/* These functions used for leak detection both in zalloc.c and mbuf.c */
314extern uint32_t fastbacktrace(uintptr_t* bt, uint32_t max_frames) __attribute__((noinline));
315extern uintptr_t hash_mix(uintptr_t);
316extern uint32_t hashbacktrace(uintptr_t *, uint32_t, uint32_t);
317extern uint32_t hashaddr(uintptr_t, uint32_t);
318
319#define lock_zone(zone)					\
320MACRO_BEGIN						\
321	lck_mtx_lock_spin(&(zone)->lock);		\
322MACRO_END
323
324#define unlock_zone(zone)				\
325MACRO_BEGIN						\
326	lck_mtx_unlock(&(zone)->lock);			\
327MACRO_END
328
329#if	CONFIG_GZALLOC
330void gzalloc_init(vm_size_t);
331void gzalloc_zone_init(zone_t);
332void gzalloc_configure(void);
333void gzalloc_reconfigure(zone_t);
334boolean_t gzalloc_enabled(void);
335
336vm_offset_t gzalloc_alloc(zone_t, boolean_t);
337boolean_t gzalloc_free(zone_t, void *);
338#endif /* CONFIG_GZALLOC */
339
340#endif	/* XNU_KERNEL_PRIVATE */
341
342__END_DECLS
343
344#endif	/* _KERN_ZALLOC_H_ */
345
346#endif	/* KERNEL_PRIVATE */
347