1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26/*
27 * Big Theory Statement for the virtual memory allocator.
28 *
29 * For a more complete description of the main ideas, see:
30 *
31 *	Jeff Bonwick and Jonathan Adams,
32 *
33 *	Magazines and vmem: Extending the Slab Allocator to Many CPUs and
34 *	Arbitrary Resources.
35 *
36 *	Proceedings of the 2001 Usenix Conference.
37 *	Available as http://www.usenix.org/event/usenix01/bonwick.html
38 *
39 *
40 * 1. General Concepts
41 * -------------------
42 *
43 * 1.1 Overview
44 * ------------
45 * We divide the kernel address space into a number of logically distinct
46 * pieces, or *arenas*: text, data, heap, stack, and so on.  Within these
47 * arenas we often subdivide further; for example, we use heap addresses
48 * not only for the kernel heap (kmem_alloc() space), but also for DVMA,
49 * bp_mapin(), /dev/kmem, and even some device mappings like the TOD chip.
50 * The kernel address space, therefore, is most accurately described as
51 * a tree of arenas in which each node of the tree *imports* some subset
52 * of its parent.  The virtual memory allocator manages these arenas and
53 * supports their natural hierarchical structure.
54 *
55 * 1.2 Arenas
56 * ----------
57 * An arena is nothing more than a set of integers.  These integers most
58 * commonly represent virtual addresses, but in fact they can represent
59 * anything at all.  For example, we could use an arena containing the
60 * integers minpid through maxpid to allocate process IDs.  vmem_create()
61 * and vmem_destroy() create and destroy vmem arenas.  In order to
62 * differentiate between arenas used for adresses and arenas used for
63 * identifiers, the VMC_IDENTIFIER flag is passed to vmem_create().  This
64 * prevents identifier exhaustion from being diagnosed as general memory
65 * failure.
66 *
67 * 1.3 Spans
68 * ---------
69 * We represent the integers in an arena as a collection of *spans*, or
70 * contiguous ranges of integers.  For example, the kernel heap consists
71 * of just one span: [kernelheap, ekernelheap).  Spans can be added to an
72 * arena in two ways: explicitly, by vmem_add(), or implicitly, by
73 * importing, as described in Section 1.5 below.
74 *
75 * 1.4 Segments
76 * ------------
77 * Spans are subdivided into *segments*, each of which is either allocated
78 * or free.  A segment, like a span, is a contiguous range of integers.
79 * Each allocated segment [addr, addr + size) represents exactly one
80 * vmem_alloc(size) that returned addr.  Free segments represent the space
81 * between allocated segments.  If two free segments are adjacent, we
82 * coalesce them into one larger segment; that is, if segments [a, b) and
83 * [b, c) are both free, we merge them into a single segment [a, c).
84 * The segments within a span are linked together in increasing-address order
85 * so we can easily determine whether coalescing is possible.
86 *
87 * Segments never cross span boundaries.  When all segments within
88 * an imported span become free, we return the span to its source.
89 *
90 * 1.5 Imported Memory
91 * -------------------
92 * As mentioned in the overview, some arenas are logical subsets of
93 * other arenas.  For example, kmem_va_arena (a virtual address cache
94 * that satisfies most kmem_slab_create() requests) is just a subset
95 * of heap_arena (the kernel heap) that provides caching for the most
96 * common slab sizes.  When kmem_va_arena runs out of virtual memory,
97 * it *imports* more from the heap; we say that heap_arena is the
98 * *vmem source* for kmem_va_arena.  vmem_create() allows you to
99 * specify any existing vmem arena as the source for your new arena.
100 * Topologically, since every arena is a child of at most one source,
101 * the set of all arenas forms a collection of trees.
102 *
103 * 1.6 Constrained Allocations
104 * ---------------------------
105 * Some vmem clients are quite picky about the kind of address they want.
106 * For example, the DVMA code may need an address that is at a particular
107 * phase with respect to some alignment (to get good cache coloring), or
108 * that lies within certain limits (the addressable range of a device),
109 * or that doesn't cross some boundary (a DMA counter restriction) --
110 * or all of the above.  vmem_xalloc() allows the client to specify any
111 * or all of these constraints.
112 *
113 * 1.7 The Vmem Quantum
114 * --------------------
115 * Every arena has a notion of 'quantum', specified at vmem_create() time,
116 * that defines the arena's minimum unit of currency.  Most commonly the
117 * quantum is either 1 or PAGESIZE, but any power of 2 is legal.
118 * All vmem allocations are guaranteed to be quantum-aligned.
119 *
120 * 1.8 Quantum Caching
121 * -------------------
122 * A vmem arena may be so hot (frequently used) that the scalability of vmem
123 * allocation is a significant concern.  We address this by allowing the most
124 * common allocation sizes to be serviced by the kernel memory allocator,
125 * which provides low-latency per-cpu caching.  The qcache_max argument to
126 * vmem_create() specifies the largest allocation size to cache.
127 *
128 * 1.9 Relationship to Kernel Memory Allocator
129 * -------------------------------------------
130 * Every kmem cache has a vmem arena as its slab supplier.  The kernel memory
131 * allocator uses vmem_alloc() and vmem_free() to create and destroy slabs.
132 *
133 *
134 * 2. Implementation
135 * -----------------
136 *
137 * 2.1 Segment lists and markers
138 * -----------------------------
139 * The segment structure (vmem_seg_t) contains two doubly-linked lists.
140 *
141 * The arena list (vs_anext/vs_aprev) links all segments in the arena.
142 * In addition to the allocated and free segments, the arena contains
143 * special marker segments at span boundaries.  Span markers simplify
144 * coalescing and importing logic by making it easy to tell both when
145 * we're at a span boundary (so we don't coalesce across it), and when
146 * a span is completely free (its neighbors will both be span markers).
147 *
148 * Imported spans will have vs_import set.
149 *
150 * The next-of-kin list (vs_knext/vs_kprev) links segments of the same type:
151 * (1) for allocated segments, vs_knext is the hash chain linkage;
152 * (2) for free segments, vs_knext is the freelist linkage;
153 * (3) for span marker segments, vs_knext is the next span marker.
154 *
155 * 2.2 Allocation hashing
156 * ----------------------
157 * We maintain a hash table of all allocated segments, hashed by address.
158 * This allows vmem_free() to discover the target segment in constant time.
159 * vmem_update() periodically resizes hash tables to keep hash chains short.
160 *
161 * 2.3 Freelist management
162 * -----------------------
163 * We maintain power-of-2 freelists for free segments, i.e. free segments
164 * of size >= 2^n reside in vmp->vm_freelist[n].  To ensure constant-time
165 * allocation, vmem_xalloc() looks not in the first freelist that *might*
166 * satisfy the allocation, but in the first freelist that *definitely*
167 * satisfies the allocation (unless VM_BESTFIT is specified, or all larger
168 * freelists are empty).  For example, a 1000-byte allocation will be
169 * satisfied not from the 512..1023-byte freelist, whose members *might*
170 * contains a 1000-byte segment, but from a 1024-byte or larger freelist,
171 * the first member of which will *definitely* satisfy the allocation.
172 * This ensures that vmem_xalloc() works in constant time.
173 *
174 * We maintain a bit map to determine quickly which freelists are non-empty.
175 * vmp->vm_freemap & (1 << n) is non-zero iff vmp->vm_freelist[n] is non-empty.
176 *
177 * The different freelists are linked together into one large freelist,
178 * with the freelist heads serving as markers.  Freelist markers simplify
179 * the maintenance of vm_freemap by making it easy to tell when we're taking
180 * the last member of a freelist (both of its neighbors will be markers).
181 *
182 * 2.4 Vmem Locking
183 * ----------------
184 * For simplicity, all arena state is protected by a per-arena lock.
185 * For very hot arenas, use quantum caching for scalability.
186 *
187 * 2.5 Vmem Population
188 * -------------------
189 * Any internal vmem routine that might need to allocate new segment
190 * structures must prepare in advance by calling vmem_populate(), which
191 * will preallocate enough vmem_seg_t's to get is through the entire
192 * operation without dropping the arena lock.
193 *
194 * 2.6 Auditing
195 * ------------
196 * If KMF_AUDIT is set in kmem_flags, we audit vmem allocations as well.
197 * Since virtual addresses cannot be scribbled on, there is no equivalent
198 * in vmem to redzone checking, deadbeef, or other kmem debugging features.
199 * Moreover, we do not audit frees because segment coalescing destroys the
200 * association between an address and its segment structure.  Auditing is
201 * thus intended primarily to keep track of who's consuming the arena.
202 * Debugging support could certainly be extended in the future if it proves
203 * necessary, but we do so much live checking via the allocation hash table
204 * that even non-DEBUG systems get quite a bit of sanity checking already.
205 */
206
207#include <sys/vmem_impl.h>
208#include <sys/kmem.h>
209#include <sys/kstat.h>
210#include <sys/param.h>
211#include <sys/systm.h>
212#include <sys/atomic.h>
213#include <sys/bitmap.h>
214#include <sys/sysmacros.h>
215#include <sys/cmn_err.h>
216#include <sys/debug.h>
217#include <sys/panic.h>
218
219#define	VMEM_INITIAL		10	/* early vmem arenas */
220#define	VMEM_SEG_INITIAL	200	/* early segments */
221
222/*
223 * Adding a new span to an arena requires two segment structures: one to
224 * represent the span, and one to represent the free segment it contains.
225 */
226#define	VMEM_SEGS_PER_SPAN_CREATE	2
227
228/*
229 * Allocating a piece of an existing segment requires 0-2 segment structures
230 * depending on how much of the segment we're allocating.
231 *
232 * To allocate the entire segment, no new segment structures are needed; we
233 * simply move the existing segment structure from the freelist to the
234 * allocation hash table.
235 *
236 * To allocate a piece from the left or right end of the segment, we must
237 * split the segment into two pieces (allocated part and remainder), so we
238 * need one new segment structure to represent the remainder.
239 *
240 * To allocate from the middle of a segment, we need two new segment strucures
241 * to represent the remainders on either side of the allocated part.
242 */
243#define	VMEM_SEGS_PER_EXACT_ALLOC	0
244#define	VMEM_SEGS_PER_LEFT_ALLOC	1
245#define	VMEM_SEGS_PER_RIGHT_ALLOC	1
246#define	VMEM_SEGS_PER_MIDDLE_ALLOC	2
247
248/*
249 * vmem_populate() preallocates segment structures for vmem to do its work.
250 * It must preallocate enough for the worst case, which is when we must import
251 * a new span and then allocate from the middle of it.
252 */
253#define	VMEM_SEGS_PER_ALLOC_MAX		\
254	(VMEM_SEGS_PER_SPAN_CREATE + VMEM_SEGS_PER_MIDDLE_ALLOC)
255
256/*
257 * The segment structures themselves are allocated from vmem_seg_arena, so
258 * we have a recursion problem when vmem_seg_arena needs to populate itself.
259 * We address this by working out the maximum number of segment structures
260 * this act will require, and multiplying by the maximum number of threads
261 * that we'll allow to do it simultaneously.
262 *
263 * The worst-case segment consumption to populate vmem_seg_arena is as
264 * follows (depicted as a stack trace to indicate why events are occurring):
265 *
266 * (In order to lower the fragmentation in the heap_arena, we specify a
267 * minimum import size for the vmem_metadata_arena which is the same size
268 * as the kmem_va quantum cache allocations.  This causes the worst-case
269 * allocation from the vmem_metadata_arena to be 3 segments.)
270 *
271 * vmem_alloc(vmem_seg_arena)		-> 2 segs (span create + exact alloc)
272 *  segkmem_alloc(vmem_metadata_arena)
273 *   vmem_alloc(vmem_metadata_arena)	-> 3 segs (span create + left alloc)
274 *    vmem_alloc(heap_arena)		-> 1 seg (left alloc)
275 *   page_create()
276 *   hat_memload()
277 *    kmem_cache_alloc()
278 *     kmem_slab_create()
279 *	vmem_alloc(hat_memload_arena)	-> 2 segs (span create + exact alloc)
280 *	 segkmem_alloc(heap_arena)
281 *	  vmem_alloc(heap_arena)	-> 1 seg (left alloc)
282 *	  page_create()
283 *	  hat_memload()		-> (hat layer won't recurse further)
284 *
285 * The worst-case consumption for each arena is 3 segment structures.
286 * Of course, a 3-seg reserve could easily be blown by multiple threads.
287 * Therefore, we serialize all allocations from vmem_seg_arena (which is OK
288 * because they're rare).  We cannot allow a non-blocking allocation to get
289 * tied up behind a blocking allocation, however, so we use separate locks
290 * for VM_SLEEP and VM_NOSLEEP allocations.  Similarly, VM_PUSHPAGE allocations
291 * must not block behind ordinary VM_SLEEPs.  In addition, if the system is
292 * panicking then we must keep enough resources for panic_thread to do its
293 * work.  Thus we have at most four threads trying to allocate from
294 * vmem_seg_arena, and each thread consumes at most three segment structures,
295 * so we must maintain a 12-seg reserve.
296 */
297#define	VMEM_POPULATE_RESERVE	12
298
299/*
300 * vmem_populate() ensures that each arena has VMEM_MINFREE seg structures
301 * so that it can satisfy the worst-case allocation *and* participate in
302 * worst-case allocation from vmem_seg_arena.
303 */
304#define	VMEM_MINFREE	(VMEM_POPULATE_RESERVE + VMEM_SEGS_PER_ALLOC_MAX)
305
306static vmem_t vmem0[VMEM_INITIAL];
307static vmem_t *vmem_populator[VMEM_INITIAL];
308static uint32_t vmem_id;
309static uint32_t vmem_populators;
310static vmem_seg_t vmem_seg0[VMEM_SEG_INITIAL];
311static vmem_seg_t *vmem_segfree;
312static kmutex_t vmem_list_lock;
313static kmutex_t vmem_segfree_lock;
314static kmutex_t vmem_sleep_lock;
315static kmutex_t vmem_nosleep_lock;
316static kmutex_t vmem_pushpage_lock;
317static kmutex_t vmem_panic_lock;
318static vmem_t *vmem_list;
319static vmem_t *vmem_metadata_arena;
320static vmem_t *vmem_seg_arena;
321static vmem_t *vmem_hash_arena;
322static vmem_t *vmem_vmem_arena;
323static long vmem_update_interval = 15;	/* vmem_update() every 15 seconds */
324uint32_t vmem_mtbf;		/* mean time between failures [default: off] */
325size_t vmem_seg_size = sizeof (vmem_seg_t);
326
327static vmem_kstat_t vmem_kstat_template = {
328	{ "mem_inuse",		KSTAT_DATA_UINT64 },
329	{ "mem_import",		KSTAT_DATA_UINT64 },
330	{ "mem_total",		KSTAT_DATA_UINT64 },
331	{ "vmem_source",	KSTAT_DATA_UINT32 },
332	{ "alloc",		KSTAT_DATA_UINT64 },
333	{ "free",		KSTAT_DATA_UINT64 },
334	{ "wait",		KSTAT_DATA_UINT64 },
335	{ "fail",		KSTAT_DATA_UINT64 },
336	{ "lookup",		KSTAT_DATA_UINT64 },
337	{ "search",		KSTAT_DATA_UINT64 },
338	{ "populate_wait",	KSTAT_DATA_UINT64 },
339	{ "populate_fail",	KSTAT_DATA_UINT64 },
340	{ "contains",		KSTAT_DATA_UINT64 },
341	{ "contains_search",	KSTAT_DATA_UINT64 },
342};
343
344/*
345 * Insert/delete from arena list (type 'a') or next-of-kin list (type 'k').
346 */
347#define	VMEM_INSERT(vprev, vsp, type)					\
348{									\
349	vmem_seg_t *vnext = (vprev)->vs_##type##next;			\
350	(vsp)->vs_##type##next = (vnext);				\
351	(vsp)->vs_##type##prev = (vprev);				\
352	(vprev)->vs_##type##next = (vsp);				\
353	(vnext)->vs_##type##prev = (vsp);				\
354}
355
356#define	VMEM_DELETE(vsp, type)						\
357{									\
358	vmem_seg_t *vprev = (vsp)->vs_##type##prev;			\
359	vmem_seg_t *vnext = (vsp)->vs_##type##next;			\
360	(vprev)->vs_##type##next = (vnext);				\
361	(vnext)->vs_##type##prev = (vprev);				\
362}
363
364/*
365 * Get a vmem_seg_t from the global segfree list.
366 */
367static vmem_seg_t *
368vmem_getseg_global(void)
369{
370	vmem_seg_t *vsp;
371
372	mutex_enter(&vmem_segfree_lock);
373	if ((vsp = vmem_segfree) != NULL)
374		vmem_segfree = vsp->vs_knext;
375	mutex_exit(&vmem_segfree_lock);
376
377	return (vsp);
378}
379
380/*
381 * Put a vmem_seg_t on the global segfree list.
382 */
383static void
384vmem_putseg_global(vmem_seg_t *vsp)
385{
386	mutex_enter(&vmem_segfree_lock);
387	vsp->vs_knext = vmem_segfree;
388	vmem_segfree = vsp;
389	mutex_exit(&vmem_segfree_lock);
390}
391
392/*
393 * Get a vmem_seg_t from vmp's segfree list.
394 */
395static vmem_seg_t *
396vmem_getseg(vmem_t *vmp)
397{
398	vmem_seg_t *vsp;
399
400	ASSERT(vmp->vm_nsegfree > 0);
401
402	vsp = vmp->vm_segfree;
403	vmp->vm_segfree = vsp->vs_knext;
404	vmp->vm_nsegfree--;
405
406	return (vsp);
407}
408
409/*
410 * Put a vmem_seg_t on vmp's segfree list.
411 */
412static void
413vmem_putseg(vmem_t *vmp, vmem_seg_t *vsp)
414{
415	vsp->vs_knext = vmp->vm_segfree;
416	vmp->vm_segfree = vsp;
417	vmp->vm_nsegfree++;
418}
419
420/*
421 * Add vsp to the appropriate freelist.
422 */
423static void
424vmem_freelist_insert(vmem_t *vmp, vmem_seg_t *vsp)
425{
426	vmem_seg_t *vprev;
427
428	ASSERT(*VMEM_HASH(vmp, vsp->vs_start) != vsp);
429
430	vprev = (vmem_seg_t *)&vmp->vm_freelist[highbit(VS_SIZE(vsp)) - 1];
431	vsp->vs_type = VMEM_FREE;
432	vmp->vm_freemap |= VS_SIZE(vprev);
433	VMEM_INSERT(vprev, vsp, k);
434
435	cv_broadcast(&vmp->vm_cv);
436}
437
438/*
439 * Take vsp from the freelist.
440 */
441static void
442vmem_freelist_delete(vmem_t *vmp, vmem_seg_t *vsp)
443{
444	ASSERT(*VMEM_HASH(vmp, vsp->vs_start) != vsp);
445	ASSERT(vsp->vs_type == VMEM_FREE);
446
447	if (vsp->vs_knext->vs_start == 0 && vsp->vs_kprev->vs_start == 0) {
448		/*
449		 * The segments on both sides of 'vsp' are freelist heads,
450		 * so taking vsp leaves the freelist at vsp->vs_kprev empty.
451		 */
452		ASSERT(vmp->vm_freemap & VS_SIZE(vsp->vs_kprev));
453		vmp->vm_freemap ^= VS_SIZE(vsp->vs_kprev);
454	}
455	VMEM_DELETE(vsp, k);
456}
457
458/*
459 * Add vsp to the allocated-segment hash table and update kstats.
460 */
461static void
462vmem_hash_insert(vmem_t *vmp, vmem_seg_t *vsp)
463{
464	vmem_seg_t **bucket;
465
466	vsp->vs_type = VMEM_ALLOC;
467	bucket = VMEM_HASH(vmp, vsp->vs_start);
468	vsp->vs_knext = *bucket;
469	*bucket = vsp;
470
471	if (vmem_seg_size == sizeof (vmem_seg_t)) {
472		vsp->vs_depth = (uint8_t)getpcstack(vsp->vs_stack,
473		    VMEM_STACK_DEPTH);
474		vsp->vs_thread = curthread;
475		vsp->vs_timestamp = gethrtime();
476	} else {
477		vsp->vs_depth = 0;
478	}
479
480	vmp->vm_kstat.vk_alloc.value.ui64++;
481	vmp->vm_kstat.vk_mem_inuse.value.ui64 += VS_SIZE(vsp);
482}
483
484/*
485 * Remove vsp from the allocated-segment hash table and update kstats.
486 */
487static vmem_seg_t *
488vmem_hash_delete(vmem_t *vmp, uintptr_t addr, size_t size)
489{
490	vmem_seg_t *vsp, **prev_vspp;
491
492	prev_vspp = VMEM_HASH(vmp, addr);
493	while ((vsp = *prev_vspp) != NULL) {
494		if (vsp->vs_start == addr) {
495			*prev_vspp = vsp->vs_knext;
496			break;
497		}
498		vmp->vm_kstat.vk_lookup.value.ui64++;
499		prev_vspp = &vsp->vs_knext;
500	}
501
502	if (vsp == NULL)
503		panic("vmem_hash_delete(%p, %lx, %lu): bad free",
504		    (void *)vmp, addr, size);
505	if (VS_SIZE(vsp) != size)
506		panic("vmem_hash_delete(%p, %lx, %lu): wrong size (expect %lu)",
507		    (void *)vmp, addr, size, VS_SIZE(vsp));
508
509	vmp->vm_kstat.vk_free.value.ui64++;
510	vmp->vm_kstat.vk_mem_inuse.value.ui64 -= size;
511
512	return (vsp);
513}
514
515/*
516 * Create a segment spanning the range [start, end) and add it to the arena.
517 */
518static vmem_seg_t *
519vmem_seg_create(vmem_t *vmp, vmem_seg_t *vprev, uintptr_t start, uintptr_t end)
520{
521	vmem_seg_t *newseg = vmem_getseg(vmp);
522
523	newseg->vs_start = start;
524	newseg->vs_end = end;
525	newseg->vs_type = 0;
526	newseg->vs_import = 0;
527
528	VMEM_INSERT(vprev, newseg, a);
529
530	return (newseg);
531}
532
533/*
534 * Remove segment vsp from the arena.
535 */
536static void
537vmem_seg_destroy(vmem_t *vmp, vmem_seg_t *vsp)
538{
539	ASSERT(vsp->vs_type != VMEM_ROTOR);
540	VMEM_DELETE(vsp, a);
541
542	vmem_putseg(vmp, vsp);
543}
544
545/*
546 * Add the span [vaddr, vaddr + size) to vmp and update kstats.
547 */
548static vmem_seg_t *
549vmem_span_create(vmem_t *vmp, void *vaddr, size_t size, uint8_t import)
550{
551	vmem_seg_t *newseg, *span;
552	uintptr_t start = (uintptr_t)vaddr;
553	uintptr_t end = start + size;
554
555	ASSERT(MUTEX_HELD(&vmp->vm_lock));
556
557	if ((start | end) & (vmp->vm_quantum - 1))
558		panic("vmem_span_create(%p, %p, %lu): misaligned",
559		    (void *)vmp, vaddr, size);
560
561	span = vmem_seg_create(vmp, vmp->vm_seg0.vs_aprev, start, end);
562	span->vs_type = VMEM_SPAN;
563	span->vs_import = import;
564	VMEM_INSERT(vmp->vm_seg0.vs_kprev, span, k);
565
566	newseg = vmem_seg_create(vmp, span, start, end);
567	vmem_freelist_insert(vmp, newseg);
568
569	if (import)
570		vmp->vm_kstat.vk_mem_import.value.ui64 += size;
571	vmp->vm_kstat.vk_mem_total.value.ui64 += size;
572
573	return (newseg);
574}
575
576/*
577 * Remove span vsp from vmp and update kstats.
578 */
579static void
580vmem_span_destroy(vmem_t *vmp, vmem_seg_t *vsp)
581{
582	vmem_seg_t *span = vsp->vs_aprev;
583	size_t size = VS_SIZE(vsp);
584
585	ASSERT(MUTEX_HELD(&vmp->vm_lock));
586	ASSERT(span->vs_type == VMEM_SPAN);
587
588	if (span->vs_import)
589		vmp->vm_kstat.vk_mem_import.value.ui64 -= size;
590	vmp->vm_kstat.vk_mem_total.value.ui64 -= size;
591
592	VMEM_DELETE(span, k);
593
594	vmem_seg_destroy(vmp, vsp);
595	vmem_seg_destroy(vmp, span);
596}
597
598/*
599 * Allocate the subrange [addr, addr + size) from segment vsp.
600 * If there are leftovers on either side, place them on the freelist.
601 * Returns a pointer to the segment representing [addr, addr + size).
602 */
603static vmem_seg_t *
604vmem_seg_alloc(vmem_t *vmp, vmem_seg_t *vsp, uintptr_t addr, size_t size)
605{
606	uintptr_t vs_start = vsp->vs_start;
607	uintptr_t vs_end = vsp->vs_end;
608	size_t vs_size = vs_end - vs_start;
609	size_t realsize = P2ROUNDUP(size, vmp->vm_quantum);
610	uintptr_t addr_end = addr + realsize;
611
612	ASSERT(P2PHASE(vs_start, vmp->vm_quantum) == 0);
613	ASSERT(P2PHASE(addr, vmp->vm_quantum) == 0);
614	ASSERT(vsp->vs_type == VMEM_FREE);
615	ASSERT(addr >= vs_start && addr_end - 1 <= vs_end - 1);
616	ASSERT(addr - 1 <= addr_end - 1);
617
618	/*
619	 * If we're allocating from the start of the segment, and the
620	 * remainder will be on the same freelist, we can save quite
621	 * a bit of work.
622	 */
623	if (P2SAMEHIGHBIT(vs_size, vs_size - realsize) && addr == vs_start) {
624		ASSERT(highbit(vs_size) == highbit(vs_size - realsize));
625		vsp->vs_start = addr_end;
626		vsp = vmem_seg_create(vmp, vsp->vs_aprev, addr, addr + size);
627		vmem_hash_insert(vmp, vsp);
628		return (vsp);
629	}
630
631	vmem_freelist_delete(vmp, vsp);
632
633	if (vs_end != addr_end)
634		vmem_freelist_insert(vmp,
635		    vmem_seg_create(vmp, vsp, addr_end, vs_end));
636
637	if (vs_start != addr)
638		vmem_freelist_insert(vmp,
639		    vmem_seg_create(vmp, vsp->vs_aprev, vs_start, addr));
640
641	vsp->vs_start = addr;
642	vsp->vs_end = addr + size;
643
644	vmem_hash_insert(vmp, vsp);
645	return (vsp);
646}
647
648/*
649 * Returns 1 if we are populating, 0 otherwise.
650 * Call it if we want to prevent recursion from HAT.
651 */
652int
653vmem_is_populator()
654{
655	return (mutex_owner(&vmem_sleep_lock) == curthread ||
656	    mutex_owner(&vmem_nosleep_lock) == curthread ||
657	    mutex_owner(&vmem_pushpage_lock) == curthread ||
658	    mutex_owner(&vmem_panic_lock) == curthread);
659}
660
661/*
662 * Populate vmp's segfree list with VMEM_MINFREE vmem_seg_t structures.
663 */
664static int
665vmem_populate(vmem_t *vmp, int vmflag)
666{
667	char *p;
668	vmem_seg_t *vsp;
669	ssize_t nseg;
670	size_t size;
671	kmutex_t *lp;
672	int i;
673
674	while (vmp->vm_nsegfree < VMEM_MINFREE &&
675	    (vsp = vmem_getseg_global()) != NULL)
676		vmem_putseg(vmp, vsp);
677
678	if (vmp->vm_nsegfree >= VMEM_MINFREE)
679		return (1);
680
681	/*
682	 * If we're already populating, tap the reserve.
683	 */
684	if (vmem_is_populator()) {
685		ASSERT(vmp->vm_cflags & VMC_POPULATOR);
686		return (1);
687	}
688
689	mutex_exit(&vmp->vm_lock);
690
691	if (panic_thread == curthread)
692		lp = &vmem_panic_lock;
693	else if (vmflag & VM_NOSLEEP)
694		lp = &vmem_nosleep_lock;
695	else if (vmflag & VM_PUSHPAGE)
696		lp = &vmem_pushpage_lock;
697	else
698		lp = &vmem_sleep_lock;
699
700	mutex_enter(lp);
701
702	nseg = VMEM_MINFREE + vmem_populators * VMEM_POPULATE_RESERVE;
703	size = P2ROUNDUP(nseg * vmem_seg_size, vmem_seg_arena->vm_quantum);
704	nseg = size / vmem_seg_size;
705
706	/*
707	 * The following vmem_alloc() may need to populate vmem_seg_arena
708	 * and all the things it imports from.  When doing so, it will tap
709	 * each arena's reserve to prevent recursion (see the block comment
710	 * above the definition of VMEM_POPULATE_RESERVE).
711	 */
712	p = vmem_alloc(vmem_seg_arena, size, vmflag & VM_KMFLAGS);
713	if (p == NULL) {
714		mutex_exit(lp);
715		mutex_enter(&vmp->vm_lock);
716		vmp->vm_kstat.vk_populate_fail.value.ui64++;
717		return (0);
718	}
719
720	/*
721	 * Restock the arenas that may have been depleted during population.
722	 */
723	for (i = 0; i < vmem_populators; i++) {
724		mutex_enter(&vmem_populator[i]->vm_lock);
725		while (vmem_populator[i]->vm_nsegfree < VMEM_POPULATE_RESERVE)
726			vmem_putseg(vmem_populator[i],
727			    (vmem_seg_t *)(p + --nseg * vmem_seg_size));
728		mutex_exit(&vmem_populator[i]->vm_lock);
729	}
730
731	mutex_exit(lp);
732	mutex_enter(&vmp->vm_lock);
733
734	/*
735	 * Now take our own segments.
736	 */
737	ASSERT(nseg >= VMEM_MINFREE);
738	while (vmp->vm_nsegfree < VMEM_MINFREE)
739		vmem_putseg(vmp, (vmem_seg_t *)(p + --nseg * vmem_seg_size));
740
741	/*
742	 * Give the remainder to charity.
743	 */
744	while (nseg > 0)
745		vmem_putseg_global((vmem_seg_t *)(p + --nseg * vmem_seg_size));
746
747	return (1);
748}
749
750/*
751 * Advance a walker from its previous position to 'afterme'.
752 * Note: may drop and reacquire vmp->vm_lock.
753 */
754static void
755vmem_advance(vmem_t *vmp, vmem_seg_t *walker, vmem_seg_t *afterme)
756{
757	vmem_seg_t *vprev = walker->vs_aprev;
758	vmem_seg_t *vnext = walker->vs_anext;
759	vmem_seg_t *vsp = NULL;
760
761	VMEM_DELETE(walker, a);
762
763	if (afterme != NULL)
764		VMEM_INSERT(afterme, walker, a);
765
766	/*
767	 * The walker segment's presence may have prevented its neighbors
768	 * from coalescing.  If so, coalesce them now.
769	 */
770	if (vprev->vs_type == VMEM_FREE) {
771		if (vnext->vs_type == VMEM_FREE) {
772			ASSERT(vprev->vs_end == vnext->vs_start);
773			vmem_freelist_delete(vmp, vnext);
774			vmem_freelist_delete(vmp, vprev);
775			vprev->vs_end = vnext->vs_end;
776			vmem_freelist_insert(vmp, vprev);
777			vmem_seg_destroy(vmp, vnext);
778		}
779		vsp = vprev;
780	} else if (vnext->vs_type == VMEM_FREE) {
781		vsp = vnext;
782	}
783
784	/*
785	 * vsp could represent a complete imported span,
786	 * in which case we must return it to the source.
787	 */
788	if (vsp != NULL && vsp->vs_aprev->vs_import &&
789	    vmp->vm_source_free != NULL &&
790	    vsp->vs_aprev->vs_type == VMEM_SPAN &&
791	    vsp->vs_anext->vs_type == VMEM_SPAN) {
792		void *vaddr = (void *)vsp->vs_start;
793		size_t size = VS_SIZE(vsp);
794		ASSERT(size == VS_SIZE(vsp->vs_aprev));
795		vmem_freelist_delete(vmp, vsp);
796		vmem_span_destroy(vmp, vsp);
797		mutex_exit(&vmp->vm_lock);
798		vmp->vm_source_free(vmp->vm_source, vaddr, size);
799		mutex_enter(&vmp->vm_lock);
800	}
801}
802
803/*
804 * VM_NEXTFIT allocations deliberately cycle through all virtual addresses
805 * in an arena, so that we avoid reusing addresses for as long as possible.
806 * This helps to catch used-after-freed bugs.  It's also the perfect policy
807 * for allocating things like process IDs, where we want to cycle through
808 * all values in order.
809 */
810static void *
811vmem_nextfit_alloc(vmem_t *vmp, size_t size, int vmflag)
812{
813	vmem_seg_t *vsp, *rotor;
814	uintptr_t addr;
815	size_t realsize = P2ROUNDUP(size, vmp->vm_quantum);
816	size_t vs_size;
817
818	mutex_enter(&vmp->vm_lock);
819
820	if (vmp->vm_nsegfree < VMEM_MINFREE && !vmem_populate(vmp, vmflag)) {
821		mutex_exit(&vmp->vm_lock);
822		return (NULL);
823	}
824
825	/*
826	 * The common case is that the segment right after the rotor is free,
827	 * and large enough that extracting 'size' bytes won't change which
828	 * freelist it's on.  In this case we can avoid a *lot* of work.
829	 * Instead of the normal vmem_seg_alloc(), we just advance the start
830	 * address of the victim segment.  Instead of moving the rotor, we
831	 * create the new segment structure *behind the rotor*, which has
832	 * the same effect.  And finally, we know we don't have to coalesce
833	 * the rotor's neighbors because the new segment lies between them.
834	 */
835	rotor = &vmp->vm_rotor;
836	vsp = rotor->vs_anext;
837	if (vsp->vs_type == VMEM_FREE && (vs_size = VS_SIZE(vsp)) > realsize &&
838	    P2SAMEHIGHBIT(vs_size, vs_size - realsize)) {
839		ASSERT(highbit(vs_size) == highbit(vs_size - realsize));
840		addr = vsp->vs_start;
841		vsp->vs_start = addr + realsize;
842		vmem_hash_insert(vmp,
843		    vmem_seg_create(vmp, rotor->vs_aprev, addr, addr + size));
844		mutex_exit(&vmp->vm_lock);
845		return ((void *)addr);
846	}
847
848	/*
849	 * Starting at the rotor, look for a segment large enough to
850	 * satisfy the allocation.
851	 */
852	for (;;) {
853		vmp->vm_kstat.vk_search.value.ui64++;
854		if (vsp->vs_type == VMEM_FREE && VS_SIZE(vsp) >= size)
855			break;
856		vsp = vsp->vs_anext;
857		if (vsp == rotor) {
858			/*
859			 * We've come full circle.  One possibility is that the
860			 * there's actually enough space, but the rotor itself
861			 * is preventing the allocation from succeeding because
862			 * it's sitting between two free segments.  Therefore,
863			 * we advance the rotor and see if that liberates a
864			 * suitable segment.
865			 */
866			vmem_advance(vmp, rotor, rotor->vs_anext);
867			vsp = rotor->vs_aprev;
868			if (vsp->vs_type == VMEM_FREE && VS_SIZE(vsp) >= size)
869				break;
870			/*
871			 * If there's a lower arena we can import from, or it's
872			 * a VM_NOSLEEP allocation, let vmem_xalloc() handle it.
873			 * Otherwise, wait until another thread frees something.
874			 */
875			if (vmp->vm_source_alloc != NULL ||
876			    (vmflag & VM_NOSLEEP)) {
877				mutex_exit(&vmp->vm_lock);
878				return (vmem_xalloc(vmp, size, vmp->vm_quantum,
879				    0, 0, NULL, NULL, vmflag & VM_KMFLAGS));
880			}
881			vmp->vm_kstat.vk_wait.value.ui64++;
882			cv_wait(&vmp->vm_cv, &vmp->vm_lock);
883			vsp = rotor->vs_anext;
884		}
885	}
886
887	/*
888	 * We found a segment.  Extract enough space to satisfy the allocation.
889	 */
890	addr = vsp->vs_start;
891	vsp = vmem_seg_alloc(vmp, vsp, addr, size);
892	ASSERT(vsp->vs_type == VMEM_ALLOC &&
893	    vsp->vs_start == addr && vsp->vs_end == addr + size);
894
895	/*
896	 * Advance the rotor to right after the newly-allocated segment.
897	 * That's where the next VM_NEXTFIT allocation will begin searching.
898	 */
899	vmem_advance(vmp, rotor, vsp);
900	mutex_exit(&vmp->vm_lock);
901	return ((void *)addr);
902}
903
904/*
905 * Checks if vmp is guaranteed to have a size-byte buffer somewhere on its
906 * freelist.  If size is not a power-of-2, it can return a false-negative.
907 *
908 * Used to decide if a newly imported span is superfluous after re-acquiring
909 * the arena lock.
910 */
911static int
912vmem_canalloc(vmem_t *vmp, size_t size)
913{
914	int hb;
915	int flist = 0;
916	ASSERT(MUTEX_HELD(&vmp->vm_lock));
917
918	if ((size & (size - 1)) == 0)
919		flist = lowbit(P2ALIGN(vmp->vm_freemap, size));
920	else if ((hb = highbit(size)) < VMEM_FREELISTS)
921		flist = lowbit(P2ALIGN(vmp->vm_freemap, 1UL << hb));
922
923	return (flist);
924}
925
926/*
927 * Allocate size bytes at offset phase from an align boundary such that the
928 * resulting segment [addr, addr + size) is a subset of [minaddr, maxaddr)
929 * that does not straddle a nocross-aligned boundary.
930 */
931void *
932vmem_xalloc(vmem_t *vmp, size_t size, size_t align_arg, size_t phase,
933	size_t nocross, void *minaddr, void *maxaddr, int vmflag)
934{
935	vmem_seg_t *vsp;
936	vmem_seg_t *vbest = NULL;
937	uintptr_t addr, taddr, start, end;
938	uintptr_t align = (align_arg != 0) ? align_arg : vmp->vm_quantum;
939	void *vaddr, *xvaddr = NULL;
940	size_t xsize;
941	int hb, flist, resv;
942	uint32_t mtbf;
943
944	if ((align | phase | nocross) & (vmp->vm_quantum - 1))
945		panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
946		    "parameters not vm_quantum aligned",
947		    (void *)vmp, size, align_arg, phase, nocross,
948		    minaddr, maxaddr, vmflag);
949
950	if (nocross != 0 &&
951	    (align > nocross || P2ROUNDUP(phase + size, align) > nocross))
952		panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
953		    "overconstrained allocation",
954		    (void *)vmp, size, align_arg, phase, nocross,
955		    minaddr, maxaddr, vmflag);
956
957	if (phase >= align || (align & (align - 1)) != 0 ||
958	    (nocross & (nocross - 1)) != 0)
959		panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
960		    "parameters inconsistent or invalid",
961		    (void *)vmp, size, align_arg, phase, nocross,
962		    minaddr, maxaddr, vmflag);
963
964	if ((mtbf = vmem_mtbf | vmp->vm_mtbf) != 0 && gethrtime() % mtbf == 0 &&
965	    (vmflag & (VM_NOSLEEP | VM_PANIC)) == VM_NOSLEEP)
966		return (NULL);
967
968	mutex_enter(&vmp->vm_lock);
969	for (;;) {
970		if (vmp->vm_nsegfree < VMEM_MINFREE &&
971		    !vmem_populate(vmp, vmflag))
972			break;
973do_alloc:
974		/*
975		 * highbit() returns the highest bit + 1, which is exactly
976		 * what we want: we want to search the first freelist whose
977		 * members are *definitely* large enough to satisfy our
978		 * allocation.  However, there are certain cases in which we
979		 * want to look at the next-smallest freelist (which *might*
980		 * be able to satisfy the allocation):
981		 *
982		 * (1)	The size is exactly a power of 2, in which case
983		 *	the smaller freelist is always big enough;
984		 *
985		 * (2)	All other freelists are empty;
986		 *
987		 * (3)	We're in the highest possible freelist, which is
988		 *	always empty (e.g. the 4GB freelist on 32-bit systems);
989		 *
990		 * (4)	We're doing a best-fit or first-fit allocation.
991		 */
992		if ((size & (size - 1)) == 0) {
993			flist = lowbit(P2ALIGN(vmp->vm_freemap, size));
994		} else {
995			hb = highbit(size);
996			if ((vmp->vm_freemap >> hb) == 0 ||
997			    hb == VMEM_FREELISTS ||
998			    (vmflag & (VM_BESTFIT | VM_FIRSTFIT)))
999				hb--;
1000			flist = lowbit(P2ALIGN(vmp->vm_freemap, 1UL << hb));
1001		}
1002
1003		for (vbest = NULL, vsp = (flist == 0) ? NULL :
1004		    vmp->vm_freelist[flist - 1].vs_knext;
1005		    vsp != NULL; vsp = vsp->vs_knext) {
1006			vmp->vm_kstat.vk_search.value.ui64++;
1007			if (vsp->vs_start == 0) {
1008				/*
1009				 * We're moving up to a larger freelist,
1010				 * so if we've already found a candidate,
1011				 * the fit can't possibly get any better.
1012				 */
1013				if (vbest != NULL)
1014					break;
1015				/*
1016				 * Find the next non-empty freelist.
1017				 */
1018				flist = lowbit(P2ALIGN(vmp->vm_freemap,
1019				    VS_SIZE(vsp)));
1020				if (flist-- == 0)
1021					break;
1022				vsp = (vmem_seg_t *)&vmp->vm_freelist[flist];
1023				ASSERT(vsp->vs_knext->vs_type == VMEM_FREE);
1024				continue;
1025			}
1026			if (vsp->vs_end - 1 < (uintptr_t)minaddr)
1027				continue;
1028			if (vsp->vs_start > (uintptr_t)maxaddr - 1)
1029				continue;
1030			start = MAX(vsp->vs_start, (uintptr_t)minaddr);
1031			end = MIN(vsp->vs_end - 1, (uintptr_t)maxaddr - 1) + 1;
1032			taddr = P2PHASEUP(start, align, phase);
1033			if (P2BOUNDARY(taddr, size, nocross))
1034				taddr +=
1035				    P2ROUNDUP(P2NPHASE(taddr, nocross), align);
1036			if ((taddr - start) + size > end - start ||
1037			    (vbest != NULL && VS_SIZE(vsp) >= VS_SIZE(vbest)))
1038				continue;
1039			vbest = vsp;
1040			addr = taddr;
1041			if (!(vmflag & VM_BESTFIT) || VS_SIZE(vbest) == size)
1042				break;
1043		}
1044		if (vbest != NULL)
1045			break;
1046		ASSERT(xvaddr == NULL);
1047		if (size == 0)
1048			panic("vmem_xalloc(): size == 0");
1049		if (vmp->vm_source_alloc != NULL && nocross == 0 &&
1050		    minaddr == NULL && maxaddr == NULL) {
1051			size_t aneeded, asize;
1052			size_t aquantum = MAX(vmp->vm_quantum,
1053			    vmp->vm_source->vm_quantum);
1054			size_t aphase = phase;
1055			if ((align > aquantum) &&
1056			    !(vmp->vm_cflags & VMC_XALIGN)) {
1057				aphase = (P2PHASE(phase, aquantum) != 0) ?
1058				    align - vmp->vm_quantum : align - aquantum;
1059				ASSERT(aphase >= phase);
1060			}
1061			aneeded = MAX(size + aphase, vmp->vm_min_import);
1062			asize = P2ROUNDUP(aneeded, aquantum);
1063
1064			/*
1065			 * Determine how many segment structures we'll consume.
1066			 * The calculation must be precise because if we're
1067			 * here on behalf of vmem_populate(), we are taking
1068			 * segments from a very limited reserve.
1069			 */
1070			if (size == asize && !(vmp->vm_cflags & VMC_XALLOC))
1071				resv = VMEM_SEGS_PER_SPAN_CREATE +
1072				    VMEM_SEGS_PER_EXACT_ALLOC;
1073			else if (phase == 0 &&
1074			    align <= vmp->vm_source->vm_quantum)
1075				resv = VMEM_SEGS_PER_SPAN_CREATE +
1076				    VMEM_SEGS_PER_LEFT_ALLOC;
1077			else
1078				resv = VMEM_SEGS_PER_ALLOC_MAX;
1079
1080			ASSERT(vmp->vm_nsegfree >= resv);
1081			vmp->vm_nsegfree -= resv;	/* reserve our segs */
1082			mutex_exit(&vmp->vm_lock);
1083			if (vmp->vm_cflags & VMC_XALLOC) {
1084				size_t oasize = asize;
1085				vaddr = ((vmem_ximport_t *)
1086				    vmp->vm_source_alloc)(vmp->vm_source,
1087				    &asize, align, vmflag & VM_KMFLAGS);
1088				ASSERT(asize >= oasize);
1089				ASSERT(P2PHASE(asize,
1090				    vmp->vm_source->vm_quantum) == 0);
1091				ASSERT(!(vmp->vm_cflags & VMC_XALIGN) ||
1092				    IS_P2ALIGNED(vaddr, align));
1093			} else {
1094				vaddr = vmp->vm_source_alloc(vmp->vm_source,
1095				    asize, vmflag & VM_KMFLAGS);
1096			}
1097			mutex_enter(&vmp->vm_lock);
1098			vmp->vm_nsegfree += resv;	/* claim reservation */
1099			aneeded = size + align - vmp->vm_quantum;
1100			aneeded = P2ROUNDUP(aneeded, vmp->vm_quantum);
1101			if (vaddr != NULL) {
1102				/*
1103				 * Since we dropped the vmem lock while
1104				 * calling the import function, other
1105				 * threads could have imported space
1106				 * and made our import unnecessary.  In
1107				 * order to save space, we return
1108				 * excess imports immediately.
1109				 */
1110				if (asize > aneeded &&
1111				    vmp->vm_source_free != NULL &&
1112				    vmem_canalloc(vmp, aneeded)) {
1113					ASSERT(resv >=
1114					    VMEM_SEGS_PER_MIDDLE_ALLOC);
1115					xvaddr = vaddr;
1116					xsize = asize;
1117					goto do_alloc;
1118				}
1119				vbest = vmem_span_create(vmp, vaddr, asize, 1);
1120				addr = P2PHASEUP(vbest->vs_start, align, phase);
1121				break;
1122			} else if (vmem_canalloc(vmp, aneeded)) {
1123				/*
1124				 * Our import failed, but another thread
1125				 * added sufficient free memory to the arena
1126				 * to satisfy our request.  Go back and
1127				 * grab it.
1128				 */
1129				ASSERT(resv >= VMEM_SEGS_PER_MIDDLE_ALLOC);
1130				goto do_alloc;
1131			}
1132		}
1133
1134		/*
1135		 * If the requestor chooses to fail the allocation attempt
1136		 * rather than reap wait and retry - get out of the loop.
1137		 */
1138		if (vmflag & VM_ABORT)
1139			break;
1140		mutex_exit(&vmp->vm_lock);
1141		if (vmp->vm_cflags & VMC_IDENTIFIER)
1142			kmem_reap_idspace();
1143		else
1144			kmem_reap();
1145		mutex_enter(&vmp->vm_lock);
1146		if (vmflag & VM_NOSLEEP)
1147			break;
1148		vmp->vm_kstat.vk_wait.value.ui64++;
1149		cv_wait(&vmp->vm_cv, &vmp->vm_lock);
1150	}
1151	if (vbest != NULL) {
1152		ASSERT(vbest->vs_type == VMEM_FREE);
1153		ASSERT(vbest->vs_knext != vbest);
1154		/* re-position to end of buffer */
1155		if (vmflag & VM_ENDALLOC) {
1156			addr += ((vbest->vs_end - (addr + size)) / align) *
1157			    align;
1158		}
1159		(void) vmem_seg_alloc(vmp, vbest, addr, size);
1160		mutex_exit(&vmp->vm_lock);
1161		if (xvaddr)
1162			vmp->vm_source_free(vmp->vm_source, xvaddr, xsize);
1163		ASSERT(P2PHASE(addr, align) == phase);
1164		ASSERT(!P2BOUNDARY(addr, size, nocross));
1165		ASSERT(addr >= (uintptr_t)minaddr);
1166		ASSERT(addr + size - 1 <= (uintptr_t)maxaddr - 1);
1167		return ((void *)addr);
1168	}
1169	vmp->vm_kstat.vk_fail.value.ui64++;
1170	mutex_exit(&vmp->vm_lock);
1171	if (vmflag & VM_PANIC)
1172		panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
1173		    "cannot satisfy mandatory allocation",
1174		    (void *)vmp, size, align_arg, phase, nocross,
1175		    minaddr, maxaddr, vmflag);
1176	ASSERT(xvaddr == NULL);
1177	return (NULL);
1178}
1179
1180/*
1181 * Free the segment [vaddr, vaddr + size), where vaddr was a constrained
1182 * allocation.  vmem_xalloc() and vmem_xfree() must always be paired because
1183 * both routines bypass the quantum caches.
1184 */
1185void
1186vmem_xfree(vmem_t *vmp, void *vaddr, size_t size)
1187{
1188	vmem_seg_t *vsp, *vnext, *vprev;
1189
1190	mutex_enter(&vmp->vm_lock);
1191
1192	vsp = vmem_hash_delete(vmp, (uintptr_t)vaddr, size);
1193	vsp->vs_end = P2ROUNDUP(vsp->vs_end, vmp->vm_quantum);
1194
1195	/*
1196	 * Attempt to coalesce with the next segment.
1197	 */
1198	vnext = vsp->vs_anext;
1199	if (vnext->vs_type == VMEM_FREE) {
1200		ASSERT(vsp->vs_end == vnext->vs_start);
1201		vmem_freelist_delete(vmp, vnext);
1202		vsp->vs_end = vnext->vs_end;
1203		vmem_seg_destroy(vmp, vnext);
1204	}
1205
1206	/*
1207	 * Attempt to coalesce with the previous segment.
1208	 */
1209	vprev = vsp->vs_aprev;
1210	if (vprev->vs_type == VMEM_FREE) {
1211		ASSERT(vprev->vs_end == vsp->vs_start);
1212		vmem_freelist_delete(vmp, vprev);
1213		vprev->vs_end = vsp->vs_end;
1214		vmem_seg_destroy(vmp, vsp);
1215		vsp = vprev;
1216	}
1217
1218	/*
1219	 * If the entire span is free, return it to the source.
1220	 */
1221	if (vsp->vs_aprev->vs_import && vmp->vm_source_free != NULL &&
1222	    vsp->vs_aprev->vs_type == VMEM_SPAN &&
1223	    vsp->vs_anext->vs_type == VMEM_SPAN) {
1224		vaddr = (void *)vsp->vs_start;
1225		size = VS_SIZE(vsp);
1226		ASSERT(size == VS_SIZE(vsp->vs_aprev));
1227		vmem_span_destroy(vmp, vsp);
1228		mutex_exit(&vmp->vm_lock);
1229		vmp->vm_source_free(vmp->vm_source, vaddr, size);
1230	} else {
1231		vmem_freelist_insert(vmp, vsp);
1232		mutex_exit(&vmp->vm_lock);
1233	}
1234}
1235
1236/*
1237 * Allocate size bytes from arena vmp.  Returns the allocated address
1238 * on success, NULL on failure.  vmflag specifies VM_SLEEP or VM_NOSLEEP,
1239 * and may also specify best-fit, first-fit, or next-fit allocation policy
1240 * instead of the default instant-fit policy.  VM_SLEEP allocations are
1241 * guaranteed to succeed.
1242 */
1243void *
1244vmem_alloc(vmem_t *vmp, size_t size, int vmflag)
1245{
1246	vmem_seg_t *vsp;
1247	uintptr_t addr;
1248	int hb;
1249	int flist = 0;
1250	uint32_t mtbf;
1251
1252	if (size - 1 < vmp->vm_qcache_max)
1253		return (kmem_cache_alloc(vmp->vm_qcache[(size - 1) >>
1254		    vmp->vm_qshift], vmflag & VM_KMFLAGS));
1255
1256	if ((mtbf = vmem_mtbf | vmp->vm_mtbf) != 0 && gethrtime() % mtbf == 0 &&
1257	    (vmflag & (VM_NOSLEEP | VM_PANIC)) == VM_NOSLEEP)
1258		return (NULL);
1259
1260	if (vmflag & VM_NEXTFIT)
1261		return (vmem_nextfit_alloc(vmp, size, vmflag));
1262
1263	if (vmflag & (VM_BESTFIT | VM_FIRSTFIT))
1264		return (vmem_xalloc(vmp, size, vmp->vm_quantum, 0, 0,
1265		    NULL, NULL, vmflag));
1266
1267	/*
1268	 * Unconstrained instant-fit allocation from the segment list.
1269	 */
1270	mutex_enter(&vmp->vm_lock);
1271
1272	if (vmp->vm_nsegfree >= VMEM_MINFREE || vmem_populate(vmp, vmflag)) {
1273		if ((size & (size - 1)) == 0)
1274			flist = lowbit(P2ALIGN(vmp->vm_freemap, size));
1275		else if ((hb = highbit(size)) < VMEM_FREELISTS)
1276			flist = lowbit(P2ALIGN(vmp->vm_freemap, 1UL << hb));
1277	}
1278
1279	if (flist-- == 0) {
1280		mutex_exit(&vmp->vm_lock);
1281		return (vmem_xalloc(vmp, size, vmp->vm_quantum,
1282		    0, 0, NULL, NULL, vmflag));
1283	}
1284
1285	ASSERT(size <= (1UL << flist));
1286	vsp = vmp->vm_freelist[flist].vs_knext;
1287	addr = vsp->vs_start;
1288	if (vmflag & VM_ENDALLOC) {
1289		addr += vsp->vs_end - (addr + size);
1290	}
1291	(void) vmem_seg_alloc(vmp, vsp, addr, size);
1292	mutex_exit(&vmp->vm_lock);
1293	return ((void *)addr);
1294}
1295
1296/*
1297 * Free the segment [vaddr, vaddr + size).
1298 */
1299void
1300vmem_free(vmem_t *vmp, void *vaddr, size_t size)
1301{
1302	if (size - 1 < vmp->vm_qcache_max)
1303		kmem_cache_free(vmp->vm_qcache[(size - 1) >> vmp->vm_qshift],
1304		    vaddr);
1305	else
1306		vmem_xfree(vmp, vaddr, size);
1307}
1308
1309/*
1310 * Determine whether arena vmp contains the segment [vaddr, vaddr + size).
1311 */
1312int
1313vmem_contains(vmem_t *vmp, void *vaddr, size_t size)
1314{
1315	uintptr_t start = (uintptr_t)vaddr;
1316	uintptr_t end = start + size;
1317	vmem_seg_t *vsp;
1318	vmem_seg_t *seg0 = &vmp->vm_seg0;
1319
1320	mutex_enter(&vmp->vm_lock);
1321	vmp->vm_kstat.vk_contains.value.ui64++;
1322	for (vsp = seg0->vs_knext; vsp != seg0; vsp = vsp->vs_knext) {
1323		vmp->vm_kstat.vk_contains_search.value.ui64++;
1324		ASSERT(vsp->vs_type == VMEM_SPAN);
1325		if (start >= vsp->vs_start && end - 1 <= vsp->vs_end - 1)
1326			break;
1327	}
1328	mutex_exit(&vmp->vm_lock);
1329	return (vsp != seg0);
1330}
1331
1332/*
1333 * Add the span [vaddr, vaddr + size) to arena vmp.
1334 */
1335void *
1336vmem_add(vmem_t *vmp, void *vaddr, size_t size, int vmflag)
1337{
1338	if (vaddr == NULL || size == 0)
1339		panic("vmem_add(%p, %p, %lu): bad arguments",
1340		    (void *)vmp, vaddr, size);
1341
1342	ASSERT(!vmem_contains(vmp, vaddr, size));
1343
1344	mutex_enter(&vmp->vm_lock);
1345	if (vmem_populate(vmp, vmflag))
1346		(void) vmem_span_create(vmp, vaddr, size, 0);
1347	else
1348		vaddr = NULL;
1349	mutex_exit(&vmp->vm_lock);
1350	return (vaddr);
1351}
1352
1353/*
1354 * Walk the vmp arena, applying func to each segment matching typemask.
1355 * If VMEM_REENTRANT is specified, the arena lock is dropped across each
1356 * call to func(); otherwise, it is held for the duration of vmem_walk()
1357 * to ensure a consistent snapshot.  Note that VMEM_REENTRANT callbacks
1358 * are *not* necessarily consistent, so they may only be used when a hint
1359 * is adequate.
1360 */
1361void
1362vmem_walk(vmem_t *vmp, int typemask,
1363	void (*func)(void *, void *, size_t), void *arg)
1364{
1365	vmem_seg_t *vsp;
1366	vmem_seg_t *seg0 = &vmp->vm_seg0;
1367	vmem_seg_t walker;
1368
1369	if (typemask & VMEM_WALKER)
1370		return;
1371
1372	bzero(&walker, sizeof (walker));
1373	walker.vs_type = VMEM_WALKER;
1374
1375	mutex_enter(&vmp->vm_lock);
1376	VMEM_INSERT(seg0, &walker, a);
1377	for (vsp = seg0->vs_anext; vsp != seg0; vsp = vsp->vs_anext) {
1378		if (vsp->vs_type & typemask) {
1379			void *start = (void *)vsp->vs_start;
1380			size_t size = VS_SIZE(vsp);
1381			if (typemask & VMEM_REENTRANT) {
1382				vmem_advance(vmp, &walker, vsp);
1383				mutex_exit(&vmp->vm_lock);
1384				func(arg, start, size);
1385				mutex_enter(&vmp->vm_lock);
1386				vsp = &walker;
1387			} else {
1388				func(arg, start, size);
1389			}
1390		}
1391	}
1392	vmem_advance(vmp, &walker, NULL);
1393	mutex_exit(&vmp->vm_lock);
1394}
1395
1396/*
1397 * Return the total amount of memory whose type matches typemask.  Thus:
1398 *
1399 *	typemask VMEM_ALLOC yields total memory allocated (in use).
1400 *	typemask VMEM_FREE yields total memory free (available).
1401 *	typemask (VMEM_ALLOC | VMEM_FREE) yields total arena size.
1402 */
1403size_t
1404vmem_size(vmem_t *vmp, int typemask)
1405{
1406	uint64_t size = 0;
1407
1408	if (typemask & VMEM_ALLOC)
1409		size += vmp->vm_kstat.vk_mem_inuse.value.ui64;
1410	if (typemask & VMEM_FREE)
1411		size += vmp->vm_kstat.vk_mem_total.value.ui64 -
1412		    vmp->vm_kstat.vk_mem_inuse.value.ui64;
1413	return ((size_t)size);
1414}
1415
1416/*
1417 * Create an arena called name whose initial span is [base, base + size).
1418 * The arena's natural unit of currency is quantum, so vmem_alloc()
1419 * guarantees quantum-aligned results.  The arena may import new spans
1420 * by invoking afunc() on source, and may return those spans by invoking
1421 * ffunc() on source.  To make small allocations fast and scalable,
1422 * the arena offers high-performance caching for each integer multiple
1423 * of quantum up to qcache_max.
1424 */
1425static vmem_t *
1426vmem_create_common(const char *name, void *base, size_t size, size_t quantum,
1427	void *(*afunc)(vmem_t *, size_t, int),
1428	void (*ffunc)(vmem_t *, void *, size_t),
1429	vmem_t *source, size_t qcache_max, int vmflag)
1430{
1431	int i;
1432	size_t nqcache;
1433	vmem_t *vmp, *cur, **vmpp;
1434	vmem_seg_t *vsp;
1435	vmem_freelist_t *vfp;
1436	uint32_t id = atomic_add_32_nv(&vmem_id, 1);
1437
1438	if (vmem_vmem_arena != NULL) {
1439		vmp = vmem_alloc(vmem_vmem_arena, sizeof (vmem_t),
1440		    vmflag & VM_KMFLAGS);
1441	} else {
1442		ASSERT(id <= VMEM_INITIAL);
1443		vmp = &vmem0[id - 1];
1444	}
1445
1446	/* An identifier arena must inherit from another identifier arena */
1447	ASSERT(source == NULL || ((source->vm_cflags & VMC_IDENTIFIER) ==
1448	    (vmflag & VMC_IDENTIFIER)));
1449
1450	if (vmp == NULL)
1451		return (NULL);
1452	bzero(vmp, sizeof (vmem_t));
1453
1454	(void) snprintf(vmp->vm_name, VMEM_NAMELEN, "%s", name);
1455	mutex_init(&vmp->vm_lock, NULL, MUTEX_DEFAULT, NULL);
1456	cv_init(&vmp->vm_cv, NULL, CV_DEFAULT, NULL);
1457	vmp->vm_cflags = vmflag;
1458	vmflag &= VM_KMFLAGS;
1459
1460	vmp->vm_quantum = quantum;
1461	vmp->vm_qshift = highbit(quantum) - 1;
1462	nqcache = MIN(qcache_max >> vmp->vm_qshift, VMEM_NQCACHE_MAX);
1463
1464	for (i = 0; i <= VMEM_FREELISTS; i++) {
1465		vfp = &vmp->vm_freelist[i];
1466		vfp->vs_end = 1UL << i;
1467		vfp->vs_knext = (vmem_seg_t *)(vfp + 1);
1468		vfp->vs_kprev = (vmem_seg_t *)(vfp - 1);
1469	}
1470
1471	vmp->vm_freelist[0].vs_kprev = NULL;
1472	vmp->vm_freelist[VMEM_FREELISTS].vs_knext = NULL;
1473	vmp->vm_freelist[VMEM_FREELISTS].vs_end = 0;
1474	vmp->vm_hash_table = vmp->vm_hash0;
1475	vmp->vm_hash_mask = VMEM_HASH_INITIAL - 1;
1476	vmp->vm_hash_shift = highbit(vmp->vm_hash_mask);
1477
1478	vsp = &vmp->vm_seg0;
1479	vsp->vs_anext = vsp;
1480	vsp->vs_aprev = vsp;
1481	vsp->vs_knext = vsp;
1482	vsp->vs_kprev = vsp;
1483	vsp->vs_type = VMEM_SPAN;
1484
1485	vsp = &vmp->vm_rotor;
1486	vsp->vs_type = VMEM_ROTOR;
1487	VMEM_INSERT(&vmp->vm_seg0, vsp, a);
1488
1489	bcopy(&vmem_kstat_template, &vmp->vm_kstat, sizeof (vmem_kstat_t));
1490
1491	vmp->vm_id = id;
1492	if (source != NULL)
1493		vmp->vm_kstat.vk_source_id.value.ui32 = source->vm_id;
1494	vmp->vm_source = source;
1495	vmp->vm_source_alloc = afunc;
1496	vmp->vm_source_free = ffunc;
1497
1498	/*
1499	 * Some arenas (like vmem_metadata and kmem_metadata) cannot
1500	 * use quantum caching to lower fragmentation.  Instead, we
1501	 * increase their imports, giving a similar effect.
1502	 */
1503	if (vmp->vm_cflags & VMC_NO_QCACHE) {
1504		vmp->vm_min_import =
1505		    VMEM_QCACHE_SLABSIZE(nqcache << vmp->vm_qshift);
1506		nqcache = 0;
1507	}
1508
1509	if (nqcache != 0) {
1510		ASSERT(!(vmflag & VM_NOSLEEP));
1511		vmp->vm_qcache_max = nqcache << vmp->vm_qshift;
1512		for (i = 0; i < nqcache; i++) {
1513			char buf[VMEM_NAMELEN + 21];
1514			(void) sprintf(buf, "%s_%lu", vmp->vm_name,
1515			    (i + 1) * quantum);
1516			vmp->vm_qcache[i] = kmem_cache_create(buf,
1517			    (i + 1) * quantum, quantum, NULL, NULL, NULL,
1518			    NULL, vmp, KMC_QCACHE | KMC_NOTOUCH);
1519		}
1520	}
1521
1522	if ((vmp->vm_ksp = kstat_create("vmem", vmp->vm_id, vmp->vm_name,
1523	    "vmem", KSTAT_TYPE_NAMED, sizeof (vmem_kstat_t) /
1524	    sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) != NULL) {
1525		vmp->vm_ksp->ks_data = &vmp->vm_kstat;
1526		kstat_install(vmp->vm_ksp);
1527	}
1528
1529	mutex_enter(&vmem_list_lock);
1530	vmpp = &vmem_list;
1531	while ((cur = *vmpp) != NULL)
1532		vmpp = &cur->vm_next;
1533	*vmpp = vmp;
1534	mutex_exit(&vmem_list_lock);
1535
1536	if (vmp->vm_cflags & VMC_POPULATOR) {
1537		ASSERT(vmem_populators < VMEM_INITIAL);
1538		vmem_populator[atomic_add_32_nv(&vmem_populators, 1) - 1] = vmp;
1539		mutex_enter(&vmp->vm_lock);
1540		(void) vmem_populate(vmp, vmflag | VM_PANIC);
1541		mutex_exit(&vmp->vm_lock);
1542	}
1543
1544	if ((base || size) && vmem_add(vmp, base, size, vmflag) == NULL) {
1545		vmem_destroy(vmp);
1546		return (NULL);
1547	}
1548
1549	return (vmp);
1550}
1551
1552vmem_t *
1553vmem_xcreate(const char *name, void *base, size_t size, size_t quantum,
1554    vmem_ximport_t *afunc, vmem_free_t *ffunc, vmem_t *source,
1555    size_t qcache_max, int vmflag)
1556{
1557	ASSERT(!(vmflag & (VMC_POPULATOR | VMC_XALLOC)));
1558	vmflag &= ~(VMC_POPULATOR | VMC_XALLOC);
1559
1560	return (vmem_create_common(name, base, size, quantum,
1561	    (vmem_alloc_t *)afunc, ffunc, source, qcache_max,
1562	    vmflag | VMC_XALLOC));
1563}
1564
1565vmem_t *
1566vmem_create(const char *name, void *base, size_t size, size_t quantum,
1567    vmem_alloc_t *afunc, vmem_free_t *ffunc, vmem_t *source,
1568    size_t qcache_max, int vmflag)
1569{
1570	ASSERT(!(vmflag & (VMC_XALLOC | VMC_XALIGN)));
1571	vmflag &= ~(VMC_XALLOC | VMC_XALIGN);
1572
1573	return (vmem_create_common(name, base, size, quantum,
1574	    afunc, ffunc, source, qcache_max, vmflag));
1575}
1576
1577/*
1578 * Destroy arena vmp.
1579 */
1580void
1581vmem_destroy(vmem_t *vmp)
1582{
1583	vmem_t *cur, **vmpp;
1584	vmem_seg_t *seg0 = &vmp->vm_seg0;
1585	vmem_seg_t *vsp, *anext;
1586	size_t leaked;
1587	int i;
1588
1589	mutex_enter(&vmem_list_lock);
1590	vmpp = &vmem_list;
1591	while ((cur = *vmpp) != vmp)
1592		vmpp = &cur->vm_next;
1593	*vmpp = vmp->vm_next;
1594	mutex_exit(&vmem_list_lock);
1595
1596	for (i = 0; i < VMEM_NQCACHE_MAX; i++)
1597		if (vmp->vm_qcache[i])
1598			kmem_cache_destroy(vmp->vm_qcache[i]);
1599
1600	leaked = vmem_size(vmp, VMEM_ALLOC);
1601	if (leaked != 0)
1602		cmn_err(CE_WARN, "vmem_destroy('%s'): leaked %lu %s",
1603		    vmp->vm_name, leaked, (vmp->vm_cflags & VMC_IDENTIFIER) ?
1604		    "identifiers" : "bytes");
1605
1606	if (vmp->vm_hash_table != vmp->vm_hash0)
1607		vmem_free(vmem_hash_arena, vmp->vm_hash_table,
1608		    (vmp->vm_hash_mask + 1) * sizeof (void *));
1609
1610	/*
1611	 * Give back the segment structures for anything that's left in the
1612	 * arena, e.g. the primary spans and their free segments.
1613	 */
1614	VMEM_DELETE(&vmp->vm_rotor, a);
1615	for (vsp = seg0->vs_anext; vsp != seg0; vsp = anext) {
1616		anext = vsp->vs_anext;
1617		vmem_putseg_global(vsp);
1618	}
1619
1620	while (vmp->vm_nsegfree > 0)
1621		vmem_putseg_global(vmem_getseg(vmp));
1622
1623	kstat_delete(vmp->vm_ksp);
1624
1625	mutex_destroy(&vmp->vm_lock);
1626	cv_destroy(&vmp->vm_cv);
1627	vmem_free(vmem_vmem_arena, vmp, sizeof (vmem_t));
1628}
1629
1630/*
1631 * Resize vmp's hash table to keep the average lookup depth near 1.0.
1632 */
1633static void
1634vmem_hash_rescale(vmem_t *vmp)
1635{
1636	vmem_seg_t **old_table, **new_table, *vsp;
1637	size_t old_size, new_size, h, nseg;
1638
1639	nseg = (size_t)(vmp->vm_kstat.vk_alloc.value.ui64 -
1640	    vmp->vm_kstat.vk_free.value.ui64);
1641
1642	new_size = MAX(VMEM_HASH_INITIAL, 1 << (highbit(3 * nseg + 4) - 2));
1643	old_size = vmp->vm_hash_mask + 1;
1644
1645	if ((old_size >> 1) <= new_size && new_size <= (old_size << 1))
1646		return;
1647
1648	new_table = vmem_alloc(vmem_hash_arena, new_size * sizeof (void *),
1649	    VM_NOSLEEP);
1650	if (new_table == NULL)
1651		return;
1652	bzero(new_table, new_size * sizeof (void *));
1653
1654	mutex_enter(&vmp->vm_lock);
1655
1656	old_size = vmp->vm_hash_mask + 1;
1657	old_table = vmp->vm_hash_table;
1658
1659	vmp->vm_hash_mask = new_size - 1;
1660	vmp->vm_hash_table = new_table;
1661	vmp->vm_hash_shift = highbit(vmp->vm_hash_mask);
1662
1663	for (h = 0; h < old_size; h++) {
1664		vsp = old_table[h];
1665		while (vsp != NULL) {
1666			uintptr_t addr = vsp->vs_start;
1667			vmem_seg_t *next_vsp = vsp->vs_knext;
1668			vmem_seg_t **hash_bucket = VMEM_HASH(vmp, addr);
1669			vsp->vs_knext = *hash_bucket;
1670			*hash_bucket = vsp;
1671			vsp = next_vsp;
1672		}
1673	}
1674
1675	mutex_exit(&vmp->vm_lock);
1676
1677	if (old_table != vmp->vm_hash0)
1678		vmem_free(vmem_hash_arena, old_table,
1679		    old_size * sizeof (void *));
1680}
1681
1682/*
1683 * Perform periodic maintenance on all vmem arenas.
1684 */
1685void
1686vmem_update(void *dummy)
1687{
1688	vmem_t *vmp;
1689
1690	mutex_enter(&vmem_list_lock);
1691	for (vmp = vmem_list; vmp != NULL; vmp = vmp->vm_next) {
1692		/*
1693		 * If threads are waiting for resources, wake them up
1694		 * periodically so they can issue another kmem_reap()
1695		 * to reclaim resources cached by the slab allocator.
1696		 */
1697		cv_broadcast(&vmp->vm_cv);
1698
1699		/*
1700		 * Rescale the hash table to keep the hash chains short.
1701		 */
1702		vmem_hash_rescale(vmp);
1703	}
1704	mutex_exit(&vmem_list_lock);
1705
1706	(void) timeout(vmem_update, dummy, vmem_update_interval * hz);
1707}
1708
1709/*
1710 * Prepare vmem for use.
1711 */
1712vmem_t *
1713vmem_init(const char *heap_name,
1714	void *heap_start, size_t heap_size, size_t heap_quantum,
1715	void *(*heap_alloc)(vmem_t *, size_t, int),
1716	void (*heap_free)(vmem_t *, void *, size_t))
1717{
1718	uint32_t id;
1719	int nseg = VMEM_SEG_INITIAL;
1720	vmem_t *heap;
1721
1722	while (--nseg >= 0)
1723		vmem_putseg_global(&vmem_seg0[nseg]);
1724
1725	heap = vmem_create(heap_name,
1726	    heap_start, heap_size, heap_quantum,
1727	    NULL, NULL, NULL, 0,
1728	    VM_SLEEP | VMC_POPULATOR);
1729
1730	vmem_metadata_arena = vmem_create("vmem_metadata",
1731	    NULL, 0, heap_quantum,
1732	    vmem_alloc, vmem_free, heap, 8 * heap_quantum,
1733	    VM_SLEEP | VMC_POPULATOR | VMC_NO_QCACHE);
1734
1735	vmem_seg_arena = vmem_create("vmem_seg",
1736	    NULL, 0, heap_quantum,
1737	    heap_alloc, heap_free, vmem_metadata_arena, 0,
1738	    VM_SLEEP | VMC_POPULATOR);
1739
1740	vmem_hash_arena = vmem_create("vmem_hash",
1741	    NULL, 0, 8,
1742	    heap_alloc, heap_free, vmem_metadata_arena, 0,
1743	    VM_SLEEP);
1744
1745	vmem_vmem_arena = vmem_create("vmem_vmem",
1746	    vmem0, sizeof (vmem0), 1,
1747	    heap_alloc, heap_free, vmem_metadata_arena, 0,
1748	    VM_SLEEP);
1749
1750	for (id = 0; id < vmem_id; id++)
1751		(void) vmem_xalloc(vmem_vmem_arena, sizeof (vmem_t),
1752		    1, 0, 0, &vmem0[id], &vmem0[id + 1],
1753		    VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
1754
1755	return (heap);
1756}
1757