1/*
2 * Copyright (c) 2000-2011 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
49 *  School of Computer Science
50 *  Carnegie Mellon University
51 *  Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 *	File:	kern/kalloc.c
60 *	Author:	Avadis Tevanian, Jr.
61 *	Date:	1985
62 *
63 *	General kernel memory allocator.  This allocator is designed
64 *	to be used by the kernel to manage dynamic memory fast.
65 */
66
67#include <zone_debug.h>
68
69#include <mach/boolean.h>
70#include <mach/machine/vm_types.h>
71#include <mach/vm_param.h>
72#include <kern/misc_protos.h>
73#include <kern/zalloc.h>
74#include <kern/kalloc.h>
75#include <kern/ledger.h>
76#include <vm/vm_kern.h>
77#include <vm/vm_object.h>
78#include <vm/vm_map.h>
79#include <libkern/OSMalloc.h>
80
81#ifdef MACH_BSD
82zone_t kalloc_zone(vm_size_t);
83#endif
84
85#define KALLOC_MAP_SIZE_MIN  (16 * 1024 * 1024)
86#define KALLOC_MAP_SIZE_MAX  (128 * 1024 * 1024)
87vm_map_t kalloc_map;
88vm_size_t kalloc_max;
89vm_size_t kalloc_max_prerounded;
90vm_size_t kalloc_kernmap_size;	/* size of kallocs that can come from kernel map */
91
92unsigned int kalloc_large_inuse;
93vm_size_t    kalloc_large_total;
94vm_size_t    kalloc_large_max;
95vm_size_t    kalloc_largest_allocated = 0;
96uint64_t    kalloc_large_sum;
97
98int	kalloc_fake_zone_index = -1; /* index of our fake zone in statistics arrays */
99
100vm_offset_t	kalloc_map_min;
101vm_offset_t	kalloc_map_max;
102
103#ifdef	MUTEX_ZONE
104/*
105 * Diagnostic code to track mutexes separately rather than via the 2^ zones
106 */
107	zone_t		lck_mtx_zone;
108#endif
109
110static void
111KALLOC_ZINFO_SALLOC(vm_size_t bytes)
112{
113	thread_t thr = current_thread();
114	task_t task;
115	zinfo_usage_t zinfo;
116
117	ledger_debit(thr->t_ledger, task_ledgers.tkm_shared, bytes);
118
119	if (kalloc_fake_zone_index != -1 &&
120	    (task = thr->task) != NULL && (zinfo = task->tkm_zinfo) != NULL)
121		zinfo[kalloc_fake_zone_index].alloc += bytes;
122}
123
124static void
125KALLOC_ZINFO_SFREE(vm_size_t bytes)
126{
127	thread_t thr = current_thread();
128	task_t task;
129	zinfo_usage_t zinfo;
130
131	ledger_credit(thr->t_ledger, task_ledgers.tkm_shared, bytes);
132
133	if (kalloc_fake_zone_index != -1 &&
134	    (task = thr->task) != NULL && (zinfo = task->tkm_zinfo) != NULL)
135		zinfo[kalloc_fake_zone_index].free += bytes;
136}
137
138/*
139 *	All allocations of size less than kalloc_max are rounded to the
140 *	next nearest sized zone.  This allocator is built on top of
141 *	the zone allocator.  A zone is created for each potential size
142 *	that we are willing to get in small blocks.
143 *
144 *	We assume that kalloc_max is not greater than 64K;
145 *
146 *	Note that kalloc_max is somewhat confusingly named.
147 *	It represents the first power of two for which no zone exists.
148 *	kalloc_max_prerounded is the smallest allocation size, before
149 *	rounding, for which no zone exists.
150 *
151 *	Also if the allocation size is more than kalloc_kernmap_size
152 *	then allocate from kernel map rather than kalloc_map.
153 */
154
155#if KALLOC_MINSIZE == 16 && KALLOC_LOG2_MINALIGN == 4
156
157/*
158 * "Legacy" aka "power-of-2" backing zones with 16-byte minimum
159 * size and alignment.  Users of this profile would probably
160 * benefit from some tuning.
161 */
162
163#define K_ZONE_SIZES			\
164	16,				\
165	32,				\
166/* 6 */	64,				\
167	128,				\
168	256,				\
169/* 9 */	512,				\
170	1024,				\
171	2048,				\
172/* C */	4096
173
174
175#define K_ZONE_NAMES			\
176	"kalloc.16",			\
177	"kalloc.32",			\
178/* 6 */	"kalloc.64",			\
179	"kalloc.128",			\
180	"kalloc.256",			\
181/* 9 */	"kalloc.512",			\
182	"kalloc.1024",			\
183	"kalloc.2048",			\
184/* C */	"kalloc.4096"
185
186#define K_ZONE_MAXIMA			\
187	1024,				\
188	4096,				\
189/* 6 */	4096,				\
190	4096,				\
191	4096,				\
192/* 9 */	1024,				\
193	1024,				\
194	1024,				\
195/* C */	1024
196
197#elif KALLOC_MINSIZE == 8 && KALLOC_LOG2_MINALIGN == 3
198
199/*
200 * Tweaked for ARM (and x64) in 04/2011
201 */
202
203#define K_ZONE_SIZES			\
204/* 3 */	8,				\
205	16,	24,			\
206	32,	40,	48,		\
207/* 6 */	64,	88,	112, 		\
208	128, 	192,			\
209	256, 	384,			\
210/* 9 */	512,	768, 			\
211	1024,	1536,			\
212	2048,	3072,			\
213	4096,	6144
214
215#define K_ZONE_NAMES			\
216/* 3 */	"kalloc.8",			\
217	"kalloc.16",	"kalloc.24",	\
218	"kalloc.32",	"kalloc.40",	"kalloc.48",	\
219/* 6 */	"kalloc.64",	"kalloc.88",	"kalloc.112",	\
220	"kalloc.128",	"kalloc.192",	\
221	"kalloc.256",	"kalloc.384",	\
222/* 9 */	"kalloc.512",	"kalloc.768",	\
223	"kalloc.1024",	"kalloc.1536",	\
224	"kalloc.2048",	"kalloc.3072",	\
225	"kalloc.4096",	"kalloc.6144"
226
227#define	K_ZONE_MAXIMA			\
228/* 3 */	1024,				\
229	1024,	1024,			\
230	4096,	4096,	4096,		\
231/* 6 */	4096,	4096,	4096,		\
232	4096,	4096,			\
233	4096,	4096,			\
234/* 9 */	1024,	1024,			\
235	1024,	1024,			\
236	1024,	1024,			\
237/* C */	1024,	64
238
239#else
240#error	missing zone size parameters for kalloc
241#endif
242
243#define KALLOC_MINALIGN (1 << KALLOC_LOG2_MINALIGN)
244
245static const int k_zone_size[] = {
246	K_ZONE_SIZES,
247	8192,
248	16384,
249/* F */	32768
250};
251
252#define N_K_ZONE	(sizeof (k_zone_size) / sizeof (k_zone_size[0]))
253
254/*
255 * Many kalloc() allocations are for small structures containing a few
256 * pointers and longs - the k_zone_dlut[] direct lookup table, indexed by
257 * size normalized to the minimum alignment, finds the right zone index
258 * for them in one dereference.
259 */
260
261#define INDEX_ZDLUT(size)	\
262			(((size) + KALLOC_MINALIGN - 1) / KALLOC_MINALIGN)
263#define N_K_ZDLUT	(2048 / KALLOC_MINALIGN)
264				/* covers sizes [0 .. 2048 - KALLOC_MINALIGN] */
265#define MAX_SIZE_ZDLUT	((N_K_ZDLUT - 1) * KALLOC_MINALIGN)
266
267static int8_t k_zone_dlut[N_K_ZDLUT];	/* table of indices into k_zone[] */
268
269/*
270 * If there's no hit in the DLUT, then start searching from k_zindex_start.
271 */
272static int k_zindex_start;
273
274static zone_t k_zone[N_K_ZONE];
275
276static const char *k_zone_name[N_K_ZONE] = {
277	K_ZONE_NAMES,
278	"kalloc.8192",
279	"kalloc.16384",
280/* F */	"kalloc.32768"
281};
282
283/*
284 *  Max number of elements per zone.  zinit rounds things up correctly
285 *  Doing things this way permits each zone to have a different maximum size
286 *  based on need, rather than just guessing; it also
287 *  means its patchable in case you're wrong!
288 */
289unsigned int k_zone_max[N_K_ZONE] = {
290	K_ZONE_MAXIMA,
291	4096,
292	64,
293/* F */	64
294};
295
296/* #define KALLOC_DEBUG		1 */
297
298/* forward declarations */
299void * kalloc_canblock(
300		vm_size_t	size,
301		boolean_t	canblock);
302
303
304lck_grp_t *kalloc_lck_grp;
305lck_mtx_t kalloc_lock;
306
307#define kalloc_spin_lock()	lck_mtx_lock_spin(&kalloc_lock)
308#define kalloc_unlock()		lck_mtx_unlock(&kalloc_lock)
309
310
311/* OSMalloc local data declarations */
312static
313queue_head_t    OSMalloc_tag_list;
314
315lck_grp_t *OSMalloc_tag_lck_grp;
316lck_mtx_t OSMalloc_tag_lock;
317
318#define OSMalloc_tag_spin_lock()	lck_mtx_lock_spin(&OSMalloc_tag_lock)
319#define OSMalloc_tag_unlock()		lck_mtx_unlock(&OSMalloc_tag_lock)
320
321
322/* OSMalloc forward declarations */
323void OSMalloc_init(void);
324void OSMalloc_Tagref(OSMallocTag	tag);
325void OSMalloc_Tagrele(OSMallocTag	tag);
326
327/*
328 *	Initialize the memory allocator.  This should be called only
329 *	once on a system wide basis (i.e. first processor to get here
330 *	does the initialization).
331 *
332 *	This initializes all of the zones.
333 */
334
335void
336kalloc_init(
337	void)
338{
339	kern_return_t retval;
340	vm_offset_t min;
341	vm_size_t size, kalloc_map_size;
342	register int i;
343
344	/*
345	 * Scale the kalloc_map_size to physical memory size: stay below
346	 * 1/8th the total zone map size, or 128 MB (for a 32-bit kernel).
347	 */
348	kalloc_map_size = (vm_size_t)(sane_size >> 5);
349#if !__LP64__
350	if (kalloc_map_size > KALLOC_MAP_SIZE_MAX)
351		kalloc_map_size = KALLOC_MAP_SIZE_MAX;
352#endif /* !__LP64__ */
353	if (kalloc_map_size < KALLOC_MAP_SIZE_MIN)
354		kalloc_map_size = KALLOC_MAP_SIZE_MIN;
355
356	retval = kmem_suballoc(kernel_map, &min, kalloc_map_size,
357			       FALSE, VM_FLAGS_ANYWHERE | VM_FLAGS_PERMANENT,
358			       &kalloc_map);
359
360	if (retval != KERN_SUCCESS)
361		panic("kalloc_init: kmem_suballoc failed");
362
363	kalloc_map_min = min;
364	kalloc_map_max = min + kalloc_map_size - 1;
365
366	/*
367	 *	Ensure that zones up to size 8192 bytes exist.
368	 *	This is desirable because messages are allocated
369	 *	with kalloc, and messages up through size 8192 are common.
370	 */
371
372	if (PAGE_SIZE < 16*1024)
373		kalloc_max = 16*1024;
374	else
375		kalloc_max = PAGE_SIZE;
376	kalloc_max_prerounded = kalloc_max / 2 + 1;
377	/* size it to be more than 16 times kalloc_max (256k) for allocations from kernel map */
378	kalloc_kernmap_size = (kalloc_max * 16) + 1;
379	kalloc_largest_allocated = kalloc_kernmap_size;
380
381	/*
382	 *	Allocate a zone for each size we are going to handle.
383	 *	We specify non-paged memory.  Don't charge the caller
384	 *	for the allocation, as we aren't sure how the memory
385	 *	will be handled.
386	 */
387	for (i = 0; (size = k_zone_size[i]) < kalloc_max; i++) {
388		k_zone[i] = zinit(size, k_zone_max[i] * size, size,
389				  k_zone_name[i]);
390		zone_change(k_zone[i], Z_CALLERACCT, FALSE);
391	}
392
393	/*
394	 * Build the Direct LookUp Table for small allocations
395	 */
396	for (i = 0, size = 0; i <= N_K_ZDLUT; i++, size += KALLOC_MINALIGN) {
397		int zindex = 0;
398
399		while ((vm_size_t)k_zone_size[zindex] < size)
400			zindex++;
401
402		if (i == N_K_ZDLUT) {
403			k_zindex_start = zindex;
404			break;
405		}
406		k_zone_dlut[i] = (int8_t)zindex;
407	}
408
409#ifdef KALLOC_DEBUG
410	printf("kalloc_init: k_zindex_start %d\n", k_zindex_start);
411
412	/*
413	 * Do a quick synthesis to see how well/badly we can
414	 * find-a-zone for a given size.
415	 * Useful when debugging/tweaking the array of zone sizes.
416	 * Cache misses probably more critical than compare-branches!
417	 */
418	for (i = 0; i < (int)N_K_ZONE; i++) {
419		vm_size_t testsize = (vm_size_t)k_zone_size[i] - 1;
420		int compare = 0;
421		int zindex;
422
423		if (testsize < MAX_SIZE_ZDLUT) {
424			compare += 1;	/* 'if' (T) */
425
426			long dindex = INDEX_ZDLUT(testsize);
427			zindex = (int)k_zone_dlut[dindex];
428
429		} else if (testsize < kalloc_max_prerounded) {
430
431			compare += 2;	/* 'if' (F), 'if' (T) */
432
433			zindex = k_zindex_start;
434			while ((vm_size_t)k_zone_size[zindex] < testsize) {
435				zindex++;
436				compare++;	/* 'while' (T) */
437			}
438			compare++;	/* 'while' (F) */
439		} else
440			break;	/* not zone-backed */
441
442		zone_t z = k_zone[zindex];
443		printf("kalloc_init: req size %4lu: %11s took %d compare%s\n",
444		    (unsigned long)testsize, z->zone_name, compare,
445		    compare == 1 ? "" : "s");
446	}
447#endif
448	kalloc_lck_grp = lck_grp_alloc_init("kalloc.large", LCK_GRP_ATTR_NULL);
449	lck_mtx_init(&kalloc_lock, kalloc_lck_grp, LCK_ATTR_NULL);
450	OSMalloc_init();
451#ifdef	MUTEX_ZONE
452	lck_mtx_zone = zinit(sizeof(struct _lck_mtx_), 1024*256, 4096, "lck_mtx");
453#endif
454}
455
456/*
457 * Given an allocation size, return the kalloc zone it belongs to.
458 * Direct LookUp Table variant.
459 */
460static __inline zone_t
461get_zone_dlut(vm_size_t size)
462{
463	long dindex = INDEX_ZDLUT(size);
464	int zindex = (int)k_zone_dlut[dindex];
465	return (k_zone[zindex]);
466}
467
468/* As above, but linear search k_zone_size[] for the next zone that fits. */
469
470static __inline zone_t
471get_zone_search(vm_size_t size, int zindex)
472{
473	assert(size < kalloc_max_prerounded);
474
475	while ((vm_size_t)k_zone_size[zindex] < size)
476		zindex++;
477
478	assert((unsigned)zindex < N_K_ZONE &&
479	    (vm_size_t)k_zone_size[zindex] < kalloc_max);
480
481	return (k_zone[zindex]);
482}
483
484void *
485kalloc_canblock(
486		vm_size_t	size,
487		boolean_t       canblock)
488{
489	zone_t z;
490
491	if (size < MAX_SIZE_ZDLUT)
492		z = get_zone_dlut(size);
493	else if (size < kalloc_max_prerounded)
494		z = get_zone_search(size, k_zindex_start);
495	else {
496		/*
497		 * If size is too large for a zone, then use kmem_alloc.
498		 * (We use kmem_alloc instead of kmem_alloc_kobject so that
499		 * krealloc can use kmem_realloc.)
500		 */
501		vm_map_t alloc_map;
502		void *addr;
503
504		/* kmem_alloc could block so we return if noblock */
505		if (!canblock) {
506			return(NULL);
507		}
508
509		if (size >= kalloc_kernmap_size)
510		        alloc_map = kernel_map;
511		else
512			alloc_map = kalloc_map;
513
514		if (kmem_alloc(alloc_map, (vm_offset_t *)&addr, size) != KERN_SUCCESS) {
515			if (alloc_map != kernel_map) {
516				if (kmem_alloc(kernel_map, (vm_offset_t *)&addr, size) != KERN_SUCCESS)
517					addr = NULL;
518			}
519			else
520				addr = NULL;
521		}
522
523		if (addr != NULL) {
524			kalloc_spin_lock();
525			/*
526			 * Thread-safe version of the workaround for 4740071
527			 * (a double FREE())
528			 */
529			if (size > kalloc_largest_allocated)
530				kalloc_largest_allocated = size;
531
532		        kalloc_large_inuse++;
533		        kalloc_large_total += size;
534			kalloc_large_sum += size;
535
536			if (kalloc_large_total > kalloc_large_max)
537			        kalloc_large_max = kalloc_large_total;
538
539			kalloc_unlock();
540
541			KALLOC_ZINFO_SALLOC(size);
542		}
543		return(addr);
544	}
545#ifdef KALLOC_DEBUG
546	if (size > z->elem_size)
547		panic("%s: z %p (%s) but requested size %lu", __func__,
548		    z, z->zone_name, (unsigned long)size);
549#endif
550	assert(size <= z->elem_size);
551	return (zalloc_canblock(z, canblock));
552}
553
554void *
555kalloc(
556       vm_size_t size)
557{
558	return( kalloc_canblock(size, TRUE) );
559}
560
561void *
562kalloc_noblock(
563	       vm_size_t size)
564{
565	return( kalloc_canblock(size, FALSE) );
566}
567
568volatile SInt32 kfree_nop_count = 0;
569
570void
571kfree(
572	void 		*data,
573	vm_size_t	size)
574{
575	zone_t z;
576
577	if (size < MAX_SIZE_ZDLUT)
578		z = get_zone_dlut(size);
579	else if (size < kalloc_max_prerounded)
580		z = get_zone_search(size, k_zindex_start);
581	else {
582		/* if size was too large for a zone, then use kmem_free */
583
584		vm_map_t alloc_map = kernel_map;
585
586		if ((((vm_offset_t) data) >= kalloc_map_min) && (((vm_offset_t) data) <= kalloc_map_max))
587			alloc_map = kalloc_map;
588		if (size > kalloc_largest_allocated) {
589			        /*
590				 * work around double FREEs of small MALLOCs
591				 * this used to end up being a nop
592				 * since the pointer being freed from an
593				 * alloc backed by the zalloc world could
594				 * never show up in the kalloc_map... however,
595				 * the kernel_map is a different issue... since it
596				 * was released back into the zalloc pool, a pointer
597				 * would have gotten written over the 'size' that
598				 * the MALLOC was retaining in the first 4 bytes of
599				 * the underlying allocation... that pointer ends up
600				 * looking like a really big size on the 2nd FREE and
601				 * pushes the kfree into the kernel_map...  we
602				 * end up removing a ton of virtual space before we panic
603				 * this check causes us to ignore the kfree for a size
604				 * that must be 'bogus'... note that it might not be due
605				 * to the above scenario, but it would still be wrong and
606				 * cause serious damage.
607				 */
608
609				OSAddAtomic(1, &kfree_nop_count);
610			        return;
611		}
612		kmem_free(alloc_map, (vm_offset_t)data, size);
613
614		kalloc_spin_lock();
615
616		kalloc_large_total -= size;
617		kalloc_large_inuse--;
618
619		kalloc_unlock();
620
621		KALLOC_ZINFO_SFREE(size);
622		return;
623	}
624
625	/* free to the appropriate zone */
626#ifdef KALLOC_DEBUG
627	if (size > z->elem_size)
628		panic("%s: z %p (%s) but requested size %lu", __func__,
629		    z, z->zone_name, (unsigned long)size);
630#endif
631	assert(size <= z->elem_size);
632	zfree(z, data);
633}
634
635#ifdef MACH_BSD
636zone_t
637kalloc_zone(
638	vm_size_t       size)
639{
640	if (size < MAX_SIZE_ZDLUT)
641		return (get_zone_dlut(size));
642	if (size <= kalloc_max)
643		return (get_zone_search(size, k_zindex_start));
644	return (ZONE_NULL);
645}
646#endif
647
648void
649kalloc_fake_zone_init(int zone_index)
650{
651	kalloc_fake_zone_index = zone_index;
652}
653
654void
655kalloc_fake_zone_info(int *count,
656		      vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size, vm_size_t *alloc_size,
657		      uint64_t *sum_size, int *collectable, int *exhaustable, int *caller_acct)
658{
659	*count      = kalloc_large_inuse;
660	*cur_size   = kalloc_large_total;
661	*max_size   = kalloc_large_max;
662
663	if (kalloc_large_inuse) {
664		*elem_size  = kalloc_large_total / kalloc_large_inuse;
665		*alloc_size = kalloc_large_total / kalloc_large_inuse;
666	} else {
667		*elem_size  = 0;
668		*alloc_size = 0;
669	}
670	*sum_size   = kalloc_large_sum;
671	*collectable = 0;
672	*exhaustable = 0;
673	*caller_acct = 0;
674}
675
676
677void
678OSMalloc_init(
679	void)
680{
681	queue_init(&OSMalloc_tag_list);
682
683	OSMalloc_tag_lck_grp = lck_grp_alloc_init("OSMalloc_tag", LCK_GRP_ATTR_NULL);
684	lck_mtx_init(&OSMalloc_tag_lock, OSMalloc_tag_lck_grp, LCK_ATTR_NULL);
685}
686
687OSMallocTag
688OSMalloc_Tagalloc(
689	const char			*str,
690	uint32_t			flags)
691{
692	OSMallocTag       OSMTag;
693
694	OSMTag = (OSMallocTag)kalloc(sizeof(*OSMTag));
695
696	bzero((void *)OSMTag, sizeof(*OSMTag));
697
698	if (flags & OSMT_PAGEABLE)
699		OSMTag->OSMT_attr = OSMT_ATTR_PAGEABLE;
700
701	OSMTag->OSMT_refcnt = 1;
702
703	strncpy(OSMTag->OSMT_name, str, OSMT_MAX_NAME);
704
705	OSMalloc_tag_spin_lock();
706	enqueue_tail(&OSMalloc_tag_list, (queue_entry_t)OSMTag);
707	OSMalloc_tag_unlock();
708	OSMTag->OSMT_state = OSMT_VALID;
709	return(OSMTag);
710}
711
712void
713OSMalloc_Tagref(
714	 OSMallocTag		tag)
715{
716	if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID))
717		panic("OSMalloc_Tagref():'%s' has bad state 0x%08X\n", tag->OSMT_name, tag->OSMT_state);
718
719	(void)hw_atomic_add(&tag->OSMT_refcnt, 1);
720}
721
722void
723OSMalloc_Tagrele(
724	 OSMallocTag		tag)
725{
726	if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID))
727		panic("OSMalloc_Tagref():'%s' has bad state 0x%08X\n", tag->OSMT_name, tag->OSMT_state);
728
729	if (hw_atomic_sub(&tag->OSMT_refcnt, 1) == 0) {
730		if (hw_compare_and_store(OSMT_VALID|OSMT_RELEASED, OSMT_VALID|OSMT_RELEASED, &tag->OSMT_state)) {
731			OSMalloc_tag_spin_lock();
732			(void)remque((queue_entry_t)tag);
733			OSMalloc_tag_unlock();
734			kfree((void*)tag, sizeof(*tag));
735		} else
736			panic("OSMalloc_Tagrele():'%s' has refcnt 0\n", tag->OSMT_name);
737	}
738}
739
740void
741OSMalloc_Tagfree(
742	 OSMallocTag		tag)
743{
744	if (!hw_compare_and_store(OSMT_VALID, OSMT_VALID|OSMT_RELEASED, &tag->OSMT_state))
745		panic("OSMalloc_Tagfree():'%s' has bad state 0x%08X \n", tag->OSMT_name, tag->OSMT_state);
746
747	if (hw_atomic_sub(&tag->OSMT_refcnt, 1) == 0) {
748		OSMalloc_tag_spin_lock();
749		(void)remque((queue_entry_t)tag);
750		OSMalloc_tag_unlock();
751		kfree((void*)tag, sizeof(*tag));
752	}
753}
754
755void *
756OSMalloc(
757	uint32_t			size,
758	OSMallocTag			tag)
759{
760	void			*addr=NULL;
761	kern_return_t	kr;
762
763	OSMalloc_Tagref(tag);
764	if ((tag->OSMT_attr & OSMT_PAGEABLE)
765	    && (size & ~PAGE_MASK)) {
766
767		if ((kr = kmem_alloc_pageable(kernel_map, (vm_offset_t *)&addr, size)) != KERN_SUCCESS)
768			addr = NULL;
769	} else
770		addr = kalloc((vm_size_t)size);
771
772	if (!addr)
773		OSMalloc_Tagrele(tag);
774
775	return(addr);
776}
777
778void *
779OSMalloc_nowait(
780	uint32_t			size,
781	OSMallocTag			tag)
782{
783	void	*addr=NULL;
784
785	if (tag->OSMT_attr & OSMT_PAGEABLE)
786		return(NULL);
787
788	OSMalloc_Tagref(tag);
789	/* XXX: use non-blocking kalloc for now */
790	addr = kalloc_noblock((vm_size_t)size);
791	if (addr == NULL)
792		OSMalloc_Tagrele(tag);
793
794	return(addr);
795}
796
797void *
798OSMalloc_noblock(
799	uint32_t			size,
800	OSMallocTag			tag)
801{
802	void	*addr=NULL;
803
804	if (tag->OSMT_attr & OSMT_PAGEABLE)
805		return(NULL);
806
807	OSMalloc_Tagref(tag);
808	addr = kalloc_noblock((vm_size_t)size);
809	if (addr == NULL)
810		OSMalloc_Tagrele(tag);
811
812	return(addr);
813}
814
815void
816OSFree(
817	void				*addr,
818	uint32_t			size,
819	OSMallocTag			tag)
820{
821	if ((tag->OSMT_attr & OSMT_PAGEABLE)
822	    && (size & ~PAGE_MASK)) {
823		kmem_free(kernel_map, (vm_offset_t)addr, size);
824	} else
825		kfree((void *)addr, size);
826
827	OSMalloc_Tagrele(tag);
828}
829