1/*
2 * Copyright (c) 2006-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#ifndef _SYS_MCACHE_H
29#define	_SYS_MCACHE_H
30
31#ifdef KERNEL_PRIVATE
32
33#ifdef  __cplusplus
34extern "C" {
35#endif
36
37#include <sys/types.h>
38#include <sys/queue.h>
39#include <mach/boolean.h>
40#include <kern/locks.h>
41#include <libkern/OSAtomic.h>
42
43#ifdef ASSERT
44#undef ASSERT
45#endif
46
47#ifdef VERIFY
48#undef VERIFY
49#endif
50
51/*
52 * Unlike VERIFY(), ASSERT() is evaluated only in DEBUG build.
53 */
54#define	VERIFY(EX)	((void)((EX) || assfail(#EX, __FILE__, __LINE__)))
55#if DEBUG
56#define	ASSERT(EX)	VERIFY(EX)
57#else
58#define	ASSERT(EX)	((void)0)
59#endif
60
61/*
62 * Compile time assert; this should be on its own someday.
63 */
64#define	_CASSERT(x)	\
65	switch (0) { case 0: case (x): ; }
66
67/*
68 * Atomic macros; these should be on their own someday.
69 */
70#define	atomic_add_16_ov(a, n)						\
71	((u_int16_t) OSAddAtomic16(n, (volatile SInt16 *)a))
72
73#define	atomic_add_16(a, n)						\
74	((void) atomic_add_16_ov(a, n))
75
76#define	atomic_add_32_ov(a, n)						\
77	((u_int32_t) OSAddAtomic(n, (volatile SInt32 *)a))
78
79#define	atomic_add_32(a, n)						\
80	((void) atomic_add_32_ov(a, n))
81
82#define	atomic_add_64_ov(a, n)						\
83	((u_int64_t) OSAddAtomic64(n, (volatile SInt64 *)a))
84
85#define	atomic_add_64(a, n)						\
86	((void) atomic_add_64_ov(a, n))
87
88#define	atomic_set_64(a, n) do {					\
89	while (!OSCompareAndSwap64(*a, n, (volatile UInt64 *)a))	\
90		;							\
91} while (0)
92
93#if defined(__LP64__)
94#define	atomic_get_64(n, a) do {					\
95	(n) = *(a);							\
96} while (0)
97#else
98#define	atomic_get_64(n, a) do {					\
99	(n) = atomic_add_64_ov(a, 0);					\
100} while (0)
101#endif /* __LP64__ */
102
103#define	CPU_CACHE_SIZE	64
104
105#ifndef IS_P2ALIGNED
106#define	IS_P2ALIGNED(v, a) \
107	((((uintptr_t)(v)) & ((uintptr_t)(a) - 1)) == 0)
108#endif /* IS_P2ALIGNED */
109
110#ifndef P2ROUNDUP
111#define	P2ROUNDUP(x, align) \
112	(-(-((uintptr_t)(x)) & -(align)))
113#endif /* P2ROUNDUP */
114
115#ifndef P2ROUNDDOWN
116#define	P2ROUNDDOWN(x, align) \
117	(((uintptr_t)(x)) & ~((uintptr_t)(align) - 1))
118#endif /* P2ROUNDDOWN */
119
120#define	MCACHE_FREE_PATTERN		0xdeadbeefdeadbeefULL
121#define	MCACHE_UNINITIALIZED_PATTERN	0xbaddcafebaddcafeULL
122
123/*
124 * mcache allocation request flags.
125 *
126 * MCR_NOSLEEP and MCR_FAILOK are mutually exclusive.  The latter is used
127 * by the mbuf allocator to handle the implementation of several caches that
128 * involve multiple layers of mcache.  It implies a best effort blocking
129 * allocation request; if the request cannot be satisfied, the caller will
130 * be blocked until further notice, similar to MCR_SLEEP, except that upon
131 * a wake up it will return immediately to the caller regardless of whether
132 * the request can been fulfilled.
133 *
134 * MCR_TRYHARD implies a non-blocking allocation request, regardless of
135 * whether MCR_NOSLEEP is set.  It informs the allocator that the request
136 * should not cause the calling thread to block, and that it must have
137 * exhausted all possible schemes to fulfill the request, including doing
138 * reclaims and/or purges, before returning to the caller.
139 *
140 * Regular mcache clients should only use MCR_SLEEP or MCR_NOSLEEP.
141 */
142#define	MCR_SLEEP	0x0000		/* same as M_WAITOK */
143#define	MCR_NOSLEEP	0x0001		/* same as M_NOWAIT */
144#define	MCR_FAILOK	0x0100		/* private, for internal use only */
145#define	MCR_TRYHARD	0x0200		/* private, for internal use only */
146#define	MCR_USR1	0x1000		/* private, for internal use only */
147
148#define	MCR_NONBLOCKING	(MCR_NOSLEEP | MCR_FAILOK | MCR_TRYHARD)
149
150/*
151 * Generic one-way linked list element structure.  This is used to handle
152 * mcache_alloc_ext() requests in order to chain the allocated objects
153 * together before returning them to the caller.
154 */
155typedef struct mcache_obj {
156	struct mcache_obj	*obj_next;
157} mcache_obj_t;
158
159typedef struct mcache_bkt {
160	void		*bkt_next;	/* next bucket in list */
161	void		*bkt_obj[1];	/* one or more objects */
162} mcache_bkt_t;
163
164typedef struct mcache_bktlist {
165	mcache_bkt_t	*bl_list;	/* bucket list */
166	u_int32_t	bl_total;	/* number of buckets */
167	u_int32_t	bl_min;		/* min since last update */
168	u_int32_t	bl_reaplimit;	/* max reapable buckets */
169	u_int64_t	bl_alloc;	/* allocations from this list */
170} mcache_bktlist_t;
171
172typedef struct mcache_bkttype {
173	int		bt_bktsize;	/* bucket size (number of elements) */
174	size_t		bt_minbuf;	/* all smaller buffers qualify */
175	size_t		bt_maxbuf;	/* no larger bfufers qualify */
176	struct mcache	*bt_cache;	/* bucket cache */
177} mcache_bkttype_t;
178
179typedef struct mcache_cpu {
180	decl_lck_mtx_data(, cc_lock);
181	mcache_bkt_t	*cc_filled;	/* the currently filled bucket */
182	mcache_bkt_t	*cc_pfilled;	/* the previously filled bucket */
183	u_int64_t	cc_alloc;	/* allocations from this cpu */
184	u_int64_t	cc_free;	/* frees to this cpu */
185	int		cc_objs;	/* number of objects in filled bkt */
186	int		cc_pobjs;	/* number of objects in previous bkt */
187	int		cc_bktsize;	/* number of elements in a full bkt */
188} __attribute__((aligned(CPU_CACHE_SIZE), packed)) mcache_cpu_t;
189
190typedef unsigned int (*mcache_allocfn_t)(void *, mcache_obj_t ***,
191    unsigned int, int);
192typedef void (*mcache_freefn_t)(void *, mcache_obj_t *, boolean_t);
193typedef void (*mcache_auditfn_t)(void *, mcache_obj_t *, boolean_t);
194typedef void (*mcache_logfn_t)(u_int32_t, mcache_obj_t *, boolean_t);
195typedef void (*mcache_notifyfn_t)(void *, u_int32_t);
196
197typedef struct mcache {
198	/*
199	 * Cache properties
200	 */
201	LIST_ENTRY(mcache) mc_list;	/* cache linkage */
202	char		mc_name[32];	/* cache name */
203	struct zone	*mc_slab_zone;	/* backend zone allocator */
204	mcache_allocfn_t mc_slab_alloc;	/* slab layer allocate callback */
205	mcache_freefn_t	mc_slab_free;	/* slab layer free callback */
206	mcache_auditfn_t mc_slab_audit;	/* slab layer audit callback */
207	mcache_logfn_t mc_slab_log;	/* slab layer log callback */
208	mcache_notifyfn_t mc_slab_notify; /* slab layer notify callback */
209	void		*mc_private;	/* opaque arg to callbacks */
210	size_t		mc_bufsize;	/* object size */
211	size_t		mc_align;	/* object alignment */
212	u_int32_t	mc_flags;	/* cache creation flags */
213	u_int32_t	mc_purge_cnt;	/* # of purges requested by slab */
214	u_int32_t	mc_enable_cnt;	/* # of reenables due to purges */
215	u_int32_t	mc_waiter_cnt;	/* # of slab layer waiters */
216	u_int32_t	mc_wretry_cnt;	/* # of wait retries */
217	u_int32_t	mc_nwretry_cnt;	/* # of no-wait retry attempts */
218	u_int32_t	mc_nwfail_cnt;	/* # of no-wait retries that failed */
219	decl_lck_mtx_data(, mc_sync_lock); /* protects purges and reenables */
220	lck_attr_t	*mc_sync_lock_attr;
221	lck_grp_t	*mc_sync_lock_grp;
222	lck_grp_attr_t	*mc_sync_lock_grp_attr;
223	/*
224	 * Keep CPU and buckets layers lock statistics separate.
225	 */
226	lck_attr_t	*mc_cpu_lock_attr;
227	lck_grp_t	*mc_cpu_lock_grp;
228	lck_grp_attr_t	*mc_cpu_lock_grp_attr;
229
230	/*
231	 * Bucket layer common to all CPUs
232	 */
233	decl_lck_mtx_data(, mc_bkt_lock);
234	lck_attr_t	*mc_bkt_lock_attr;
235	lck_grp_t	*mc_bkt_lock_grp;
236	lck_grp_attr_t  *mc_bkt_lock_grp_attr;
237	mcache_bkttype_t *cache_bkttype;	/* bucket type */
238	mcache_bktlist_t mc_full;		/* full buckets */
239	mcache_bktlist_t mc_empty;		/* empty buckets */
240	size_t		mc_chunksize;		/* bufsize + alignment */
241	u_int32_t	mc_bkt_contention;	/* lock contention count */
242	u_int32_t	mc_bkt_contention_prev;	/* previous snapshot */
243
244	/*
245	 * Per-CPU layer, aligned at cache line boundary
246	 */
247	mcache_cpu_t	mc_cpu[1];
248} mcache_t;
249
250#define	MCACHE_ALIGN	8	/* default guaranteed alignment */
251
252/* Valid values for mc_flags */
253#define	MCF_VERIFY	0x00000001	/* enable verification */
254#define	MCF_TRACE	0x00000002	/* enable transaction auditing */
255#define	MCF_NOCPUCACHE	0x00000010	/* disable CPU layer caching */
256#define	MCF_NOLEAKLOG	0x00000100	/* disable leak logging */
257#define	MCF_EXPLEAKLOG	0x00000200	/* expose leak info to user space */
258
259#define	MCF_DEBUG	(MCF_VERIFY | MCF_TRACE)
260#define	MCF_FLAGS_MASK	\
261	(MCF_DEBUG | MCF_NOCPUCACHE | MCF_NOLEAKLOG | MCF_EXPLEAKLOG)
262
263/* Valid values for notify callback */
264#define	MCN_RETRYALLOC	0x00000001	/* Allocation should be retried */
265
266#define	MCACHE_STACK_DEPTH 16
267
268typedef struct mcache_audit {
269	struct mcache_audit *mca_next;	/* next audit struct */
270	void		*mca_addr;	/* address of buffer */
271	mcache_t	*mca_cache;	/* parent cache of the buffer */
272	struct thread	*mca_thread;	/* thread doing transaction */
273	struct thread	*mca_pthread;	/* previous transaction thread */
274	size_t		mca_contents_size; /* size of contents */
275	void		*mca_contents;	/* contents at last free */
276	uint16_t	mca_depth;	/* pc stack depth */
277	uint16_t	mca_pdepth;	/* previous transaction pc stack */
278	void		*mca_stack[MCACHE_STACK_DEPTH];
279	void		*mca_pstack[MCACHE_STACK_DEPTH];
280	void		*mca_uptr;	/* user-specific pointer */
281	uint32_t	mca_uflags;	/* user-specific flags */
282} mcache_audit_t;
283
284__private_extern__ int assfail(const char *, const char *, int);
285__private_extern__ void mcache_init(void);
286__private_extern__ unsigned int mcache_getflags(void);
287__private_extern__ mcache_t *mcache_create(const char *, size_t,
288    size_t, u_int32_t, int);
289__private_extern__ void *mcache_alloc(mcache_t *, int);
290__private_extern__ void mcache_free(mcache_t *, void *);
291__private_extern__ mcache_t *mcache_create_ext(const char *, size_t,
292    mcache_allocfn_t, mcache_freefn_t, mcache_auditfn_t, mcache_logfn_t,
293    mcache_notifyfn_t, void *, u_int32_t, int);
294__private_extern__ void mcache_destroy(mcache_t *);
295__private_extern__ unsigned int mcache_alloc_ext(mcache_t *, mcache_obj_t **,
296    unsigned int, int);
297__private_extern__ void mcache_free_ext(mcache_t *, mcache_obj_t *);
298__private_extern__ void mcache_reap(void);
299__private_extern__ boolean_t mcache_purge_cache(mcache_t *);
300__private_extern__ void mcache_waiter_inc(mcache_t *);
301__private_extern__ void mcache_waiter_dec(mcache_t *);
302__private_extern__ boolean_t mcache_bkt_isempty(mcache_t *);
303
304__private_extern__ void mcache_buffer_log(mcache_audit_t *, void *, mcache_t *);
305__private_extern__ void mcache_set_pattern(u_int64_t, void *, size_t);
306__private_extern__ void *mcache_verify_pattern(u_int64_t, void *, size_t);
307__private_extern__ void *mcache_verify_set_pattern(u_int64_t, u_int64_t,
308    void *, size_t);
309__private_extern__ void mcache_audit_free_verify(mcache_audit_t *,
310    void *, size_t, size_t);
311__private_extern__ void mcache_audit_free_verify_set(mcache_audit_t *,
312    void *, size_t, size_t);
313__private_extern__ char *mcache_dump_mca(mcache_audit_t *);
314__private_extern__ void mcache_audit_panic(mcache_audit_t *, void *, size_t,
315    int64_t, int64_t);
316
317__private_extern__ mcache_t *mcache_audit_cache;
318
319#ifdef  __cplusplus
320}
321#endif
322
323#endif /* KERNEL_PRIVATE */
324
325#endif /* _SYS_MCACHE_H */
326