1/*-
2 * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
3 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice unmodified, this list of conditions, and the following
11 *    disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 * $FreeBSD$
28 *
29 */
30
31#include <sys/_task.h>
32
33/*
34 * This file includes definitions, structures, prototypes, and inlines that
35 * should not be used outside of the actual implementation of UMA.
36 */
37
38/*
39 * Here's a quick description of the relationship between the objects:
40 *
41 * Kegs contain lists of slabs which are stored in either the full bin, empty
42 * bin, or partially allocated bin, to reduce fragmentation.  They also contain
43 * the user supplied value for size, which is adjusted for alignment purposes
44 * and rsize is the result of that.  The Keg also stores information for
45 * managing a hash of page addresses that maps pages to uma_slab_t structures
46 * for pages that don't have embedded uma_slab_t's.
47 *
48 * The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may
49 * be allocated off the page from a special slab zone.  The free list within a
50 * slab is managed with a bitmask.  For item sizes that would yield more than
51 * 10% memory waste we potentially allocate a separate uma_slab_t if this will
52 * improve the number of items per slab that will fit.
53 *
54 * The only really gross cases, with regards to memory waste, are for those
55 * items that are just over half the page size.   You can get nearly 50% waste,
56 * so you fall back to the memory footprint of the power of two allocator. I
57 * have looked at memory allocation sizes on many of the machines available to
58 * me, and there does not seem to be an abundance of allocations at this range
59 * so at this time it may not make sense to optimize for it.  This can, of
60 * course, be solved with dynamic slab sizes.
61 *
62 * Kegs may serve multiple Zones but by far most of the time they only serve
63 * one.  When a Zone is created, a Keg is allocated and setup for it.  While
64 * the backing Keg stores slabs, the Zone caches Buckets of items allocated
65 * from the slabs.  Each Zone is equipped with an init/fini and ctor/dtor
66 * pair, as well as with its own set of small per-CPU caches, layered above
67 * the Zone's general Bucket cache.
68 *
69 * The PCPU caches are protected by critical sections, and may be accessed
70 * safely only from their associated CPU, while the Zones backed by the same
71 * Keg all share a common Keg lock (to coalesce contention on the backing
72 * slabs).  The backing Keg typically only serves one Zone but in the case of
73 * multiple Zones, one of the Zones is considered the Master Zone and all
74 * Zone-related stats from the Keg are done in the Master Zone.  For an
75 * example of a Multi-Zone setup, refer to the Mbuf allocation code.
76 */
77
78/*
79 *	This is the representation for normal (Non OFFPAGE slab)
80 *
81 *	i == item
82 *	s == slab pointer
83 *
84 *	<----------------  Page (UMA_SLAB_SIZE) ------------------>
85 *	___________________________________________________________
86 *     | _  _  _  _  _  _  _  _  _  _  _  _  _  _  _   ___________ |
87 *     ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |slab header||
88 *     ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |___________||
89 *     |___________________________________________________________|
90 *
91 *
92 *	This is an OFFPAGE slab. These can be larger than UMA_SLAB_SIZE.
93 *
94 *	___________________________________________________________
95 *     | _  _  _  _  _  _  _  _  _  _  _  _  _  _  _  _  _  _  _   |
96 *     ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i|  |
97 *     ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_|  |
98 *     |___________________________________________________________|
99 *       ___________    ^
100 *	|slab header|   |
101 *	|___________|---*
102 *
103 */
104
105#ifndef VM_UMA_INT_H
106#define VM_UMA_INT_H
107
108#define UMA_SLAB_SIZE	PAGE_SIZE	/* How big are our slabs? */
109#define UMA_SLAB_MASK	(PAGE_SIZE - 1)	/* Mask to get back to the page */
110#define UMA_SLAB_SHIFT	PAGE_SHIFT	/* Number of bits PAGE_MASK */
111
112#define UMA_BOOT_PAGES		64	/* Pages allocated for startup */
113#define UMA_BOOT_PAGES_ZONES	32	/* Multiplier for pages to reserve */
114					/* if uma_zone > PAGE_SIZE */
115
116/* Max waste percentage before going to off page slab management */
117#define UMA_MAX_WASTE	10
118
119/*
120 * I doubt there will be many cases where this is exceeded. This is the initial
121 * size of the hash table for uma_slabs that are managed off page. This hash
122 * does expand by powers of two.  Currently it doesn't get smaller.
123 */
124#define UMA_HASH_SIZE_INIT	32
125
126/*
127 * I should investigate other hashing algorithms.  This should yield a low
128 * number of collisions if the pages are relatively contiguous.
129 */
130
131#define UMA_HASH(h, s) ((((uintptr_t)s) >> UMA_SLAB_SHIFT) & (h)->uh_hashmask)
132
133#define UMA_HASH_INSERT(h, s, mem)					\
134		SLIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h),	\
135		    (mem))], (s), us_hlink)
136#define UMA_HASH_REMOVE(h, s, mem)					\
137		SLIST_REMOVE(&(h)->uh_slab_hash[UMA_HASH((h),		\
138		    (mem))], (s), uma_slab, us_hlink)
139
140/* Hash table for freed address -> slab translation */
141
142SLIST_HEAD(slabhead, uma_slab);
143
144struct uma_hash {
145	struct slabhead	*uh_slab_hash;	/* Hash table for slabs */
146	int		uh_hashsize;	/* Current size of the hash table */
147	int		uh_hashmask;	/* Mask used during hashing */
148};
149
150/*
151 * align field or structure to cache line
152 */
153#if defined(__amd64__)
154#define UMA_ALIGN	__aligned(CACHE_LINE_SIZE)
155#else
156#define UMA_ALIGN
157#endif
158
159/*
160 * Structures for per cpu queues.
161 */
162
163struct uma_bucket {
164	LIST_ENTRY(uma_bucket)	ub_link;	/* Link into the zone */
165	int16_t	ub_cnt;				/* Count of free items. */
166	int16_t	ub_entries;			/* Max items. */
167	void	*ub_bucket[];			/* actual allocation storage */
168};
169
170typedef struct uma_bucket * uma_bucket_t;
171
172struct uma_cache {
173	uma_bucket_t	uc_freebucket;	/* Bucket we're freeing to */
174	uma_bucket_t	uc_allocbucket;	/* Bucket to allocate from */
175	uint64_t	uc_allocs;	/* Count of allocations */
176	uint64_t	uc_frees;	/* Count of frees */
177} UMA_ALIGN;
178
179typedef struct uma_cache * uma_cache_t;
180
181/*
182 * Keg management structure
183 *
184 * TODO: Optimize for cache line size
185 *
186 */
187struct uma_keg {
188	struct mtx_padalign	uk_lock;	/* Lock for the keg */
189	struct uma_hash	uk_hash;
190
191	LIST_HEAD(,uma_zone)	uk_zones;	/* Keg's zones */
192	LIST_HEAD(,uma_slab)	uk_part_slab;	/* partially allocated slabs */
193	LIST_HEAD(,uma_slab)	uk_free_slab;	/* empty slab list */
194	LIST_HEAD(,uma_slab)	uk_full_slab;	/* full slabs */
195
196	uint32_t	uk_align;	/* Alignment mask */
197	uint32_t	uk_pages;	/* Total page count */
198	uint32_t	uk_free;	/* Count of items free in slabs */
199	uint32_t	uk_reserve;	/* Number of reserved items. */
200	uint32_t	uk_size;	/* Requested size of each item */
201	uint32_t	uk_rsize;	/* Real size of each item */
202	uint32_t	uk_maxpages;	/* Maximum number of pages to alloc */
203
204	uma_init	uk_init;	/* Keg's init routine */
205	uma_fini	uk_fini;	/* Keg's fini routine */
206	uma_alloc	uk_allocf;	/* Allocation function */
207	uma_free	uk_freef;	/* Free routine */
208
209	u_long		uk_offset;	/* Next free offset from base KVA */
210	vm_offset_t	uk_kva;		/* Zone base KVA */
211	uma_zone_t	uk_slabzone;	/* Slab zone backing us, if OFFPAGE */
212
213	uint16_t	uk_slabsize;	/* Slab size for this keg */
214	uint16_t	uk_pgoff;	/* Offset to uma_slab struct */
215	uint16_t	uk_ppera;	/* pages per allocation from backend */
216	uint16_t	uk_ipers;	/* Items per slab */
217	uint32_t	uk_flags;	/* Internal flags */
218
219	/* Least used fields go to the last cache line. */
220	const char	*uk_name;		/* Name of creating zone. */
221	LIST_ENTRY(uma_keg)	uk_link;	/* List of all kegs */
222};
223typedef struct uma_keg	* uma_keg_t;
224
225/*
226 * Free bits per-slab.
227 */
228#define	SLAB_SETSIZE	(PAGE_SIZE / UMA_SMALLEST_UNIT)
229BITSET_DEFINE(slabbits, SLAB_SETSIZE);
230
231/*
232 * The slab structure manages a single contiguous allocation from backing
233 * store and subdivides it into individually allocatable items.
234 */
235struct uma_slab {
236	uma_keg_t	us_keg;			/* Keg we live in */
237	union {
238		LIST_ENTRY(uma_slab)	_us_link;	/* slabs in zone */
239		unsigned long	_us_size;	/* Size of allocation */
240	} us_type;
241	SLIST_ENTRY(uma_slab)	us_hlink;	/* Link for hash table */
242	uint8_t		*us_data;		/* First item */
243	struct slabbits	us_free;		/* Free bitmask. */
244#ifdef INVARIANTS
245	struct slabbits	us_debugfree;		/* Debug bitmask. */
246#endif
247	uint16_t	us_freecount;		/* How many are free? */
248	uint8_t		us_flags;		/* Page flags see uma.h */
249	uint8_t		us_pad;			/* Pad to 32bits, unused. */
250};
251
252#define	us_link	us_type._us_link
253#define	us_size	us_type._us_size
254
255typedef struct uma_slab * uma_slab_t;
256typedef uma_slab_t (*uma_slaballoc)(uma_zone_t, uma_keg_t, int);
257
258struct uma_klink {
259	LIST_ENTRY(uma_klink)	kl_link;
260	uma_keg_t		kl_keg;
261};
262typedef struct uma_klink *uma_klink_t;
263
264/*
265 * Zone management structure
266 *
267 * TODO: Optimize for cache line size
268 *
269 */
270struct uma_zone {
271	struct mtx_padalign	uz_lock;	/* Lock for the zone */
272	struct mtx_padalign	*uz_lockptr;
273	const char		*uz_name;	/* Text name of the zone */
274
275	LIST_ENTRY(uma_zone)	uz_link;	/* List of all zones in keg */
276	LIST_HEAD(,uma_bucket)	uz_buckets;	/* full buckets */
277
278	LIST_HEAD(,uma_klink)	uz_kegs;	/* List of kegs. */
279	struct uma_klink	uz_klink;	/* klink for first keg. */
280
281	uma_slaballoc	uz_slab;	/* Allocate a slab from the backend. */
282	uma_ctor	uz_ctor;	/* Constructor for each allocation */
283	uma_dtor	uz_dtor;	/* Destructor */
284	uma_init	uz_init;	/* Initializer for each item */
285	uma_fini	uz_fini;	/* Finalizer for each item. */
286	uma_import	uz_import;	/* Import new memory to cache. */
287	uma_release	uz_release;	/* Release memory from cache. */
288	void		*uz_arg;	/* Import/release argument. */
289
290	uint32_t	uz_flags;	/* Flags inherited from kegs */
291	uint32_t	uz_size;	/* Size inherited from kegs */
292
293	volatile u_long	uz_allocs UMA_ALIGN; /* Total number of allocations */
294	volatile u_long	uz_fails;	/* Total number of alloc failures */
295	volatile u_long	uz_frees;	/* Total number of frees */
296	uint64_t	uz_sleeps;	/* Total number of alloc sleeps */
297	uint16_t	uz_count;	/* Amount of items in full bucket */
298	uint16_t	uz_count_min;	/* Minimal amount of items there */
299
300	/* The next two fields are used to print a rate-limited warnings. */
301	const char	*uz_warning;	/* Warning to print on failure */
302	struct timeval	uz_ratecheck;	/* Warnings rate-limiting */
303
304	struct task	uz_maxaction;	/* Task to run when at limit */
305
306	/*
307	 * This HAS to be the last item because we adjust the zone size
308	 * based on NCPU and then allocate the space for the zones.
309	 */
310	struct uma_cache	uz_cpu[1]; /* Per cpu caches */
311};
312
313/*
314 * These flags must not overlap with the UMA_ZONE flags specified in uma.h.
315 */
316#define	UMA_ZFLAG_MULTI		0x04000000	/* Multiple kegs in the zone. */
317#define	UMA_ZFLAG_DRAINING	0x08000000	/* Running zone_drain. */
318#define	UMA_ZFLAG_BUCKET	0x10000000	/* Bucket zone. */
319#define UMA_ZFLAG_INTERNAL	0x20000000	/* No offpage no PCPU. */
320#define UMA_ZFLAG_FULL		0x40000000	/* Reached uz_maxpages */
321#define UMA_ZFLAG_CACHEONLY	0x80000000	/* Don't ask VM for buckets. */
322
323#define	UMA_ZFLAG_INHERIT						\
324    (UMA_ZFLAG_INTERNAL | UMA_ZFLAG_CACHEONLY | UMA_ZFLAG_BUCKET)
325
326static inline uma_keg_t
327zone_first_keg(uma_zone_t zone)
328{
329	uma_klink_t klink;
330
331	klink = LIST_FIRST(&zone->uz_kegs);
332	return (klink != NULL) ? klink->kl_keg : NULL;
333}
334
335#undef UMA_ALIGN
336
337#ifdef _KERNEL
338/* Internal prototypes */
339static __inline uma_slab_t hash_sfind(struct uma_hash *hash, uint8_t *data);
340void *uma_large_malloc(vm_size_t size, int wait);
341void uma_large_free(uma_slab_t slab);
342
343/* Lock Macros */
344
345#define	KEG_LOCK_INIT(k, lc)					\
346	do {							\
347		if ((lc))					\
348			mtx_init(&(k)->uk_lock, (k)->uk_name,	\
349			    (k)->uk_name, MTX_DEF | MTX_DUPOK);	\
350		else						\
351			mtx_init(&(k)->uk_lock, (k)->uk_name,	\
352			    "UMA zone", MTX_DEF | MTX_DUPOK);	\
353	} while (0)
354
355#define	KEG_LOCK_FINI(k)	mtx_destroy(&(k)->uk_lock)
356#define	KEG_LOCK(k)	mtx_lock(&(k)->uk_lock)
357#define	KEG_UNLOCK(k)	mtx_unlock(&(k)->uk_lock)
358
359#define	ZONE_LOCK_INIT(z, lc)					\
360	do {							\
361		if ((lc))					\
362			mtx_init(&(z)->uz_lock, (z)->uz_name,	\
363			    (z)->uz_name, MTX_DEF | MTX_DUPOK);	\
364		else						\
365			mtx_init(&(z)->uz_lock, (z)->uz_name,	\
366			    "UMA zone", MTX_DEF | MTX_DUPOK);	\
367	} while (0)
368
369#define	ZONE_LOCK(z)	mtx_lock((z)->uz_lockptr)
370#define	ZONE_TRYLOCK(z)	mtx_trylock((z)->uz_lockptr)
371#define	ZONE_UNLOCK(z)	mtx_unlock((z)->uz_lockptr)
372#define	ZONE_LOCK_FINI(z)	mtx_destroy(&(z)->uz_lock)
373
374/*
375 * Find a slab within a hash table.  This is used for OFFPAGE zones to lookup
376 * the slab structure.
377 *
378 * Arguments:
379 *	hash  The hash table to search.
380 *	data  The base page of the item.
381 *
382 * Returns:
383 *	A pointer to a slab if successful, else NULL.
384 */
385static __inline uma_slab_t
386hash_sfind(struct uma_hash *hash, uint8_t *data)
387{
388        uma_slab_t slab;
389        int hval;
390
391        hval = UMA_HASH(hash, data);
392
393        SLIST_FOREACH(slab, &hash->uh_slab_hash[hval], us_hlink) {
394                if ((uint8_t *)slab->us_data == data)
395                        return (slab);
396        }
397        return (NULL);
398}
399
400static __inline uma_slab_t
401vtoslab(vm_offset_t va)
402{
403	vm_page_t p;
404
405	p = PHYS_TO_VM_PAGE(pmap_kextract(va));
406	return ((uma_slab_t)p->plinks.s.pv);
407}
408
409static __inline void
410vsetslab(vm_offset_t va, uma_slab_t slab)
411{
412	vm_page_t p;
413
414	p = PHYS_TO_VM_PAGE(pmap_kextract(va));
415	p->plinks.s.pv = slab;
416}
417
418/*
419 * The following two functions may be defined by architecture specific code
420 * if they can provide more efficient allocation functions.  This is useful
421 * for using direct mapped addresses.
422 */
423void *uma_small_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag,
424    int wait);
425void uma_small_free(void *mem, vm_size_t size, uint8_t flags);
426#endif /* _KERNEL */
427
428#endif /* VM_UMA_INT_H */
429