subr_vmem.c revision 253596
1/*-
2 * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi,
3 * Copyright (c) 2013 EMC Corp.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28/*
29 * From:
30 *	$NetBSD: vmem_impl.h,v 1.2 2013/01/29 21:26:24 para Exp $
31 *	$NetBSD: subr_vmem.c,v 1.83 2013/03/06 11:20:10 yamt Exp $
32 */
33
34/*
35 * reference:
36 * -	Magazines and Vmem: Extending the Slab Allocator
37 *	to Many CPUs and Arbitrary Resources
38 *	http://www.usenix.org/event/usenix01/bonwick.html
39 */
40
41#include <sys/cdefs.h>
42__FBSDID("$FreeBSD: head/sys/kern/subr_vmem.c 253596 2013-07-24 08:02:56Z glebius $");
43
44#include "opt_ddb.h"
45
46#include <sys/param.h>
47#include <sys/systm.h>
48#include <sys/kernel.h>
49#include <sys/queue.h>
50#include <sys/callout.h>
51#include <sys/hash.h>
52#include <sys/lock.h>
53#include <sys/malloc.h>
54#include <sys/mutex.h>
55#include <sys/smp.h>
56#include <sys/condvar.h>
57#include <sys/taskqueue.h>
58#include <sys/vmem.h>
59
60#include <vm/uma.h>
61#include <vm/vm.h>
62#include <vm/pmap.h>
63#include <vm/vm_map.h>
64#include <vm/vm_kern.h>
65#include <vm/vm_extern.h>
66#include <vm/vm_param.h>
67#include <vm/vm_pageout.h>
68
69#define	VMEM_MAXORDER		(sizeof(vmem_size_t) * NBBY)
70
71#define	VMEM_HASHSIZE_MIN	16
72#define	VMEM_HASHSIZE_MAX	131072
73
74#define	VMEM_QCACHE_IDX_MAX	16
75
76#define	VMEM_FITMASK	(M_BESTFIT | M_FIRSTFIT)
77
78#define	VMEM_FLAGS						\
79    (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM | M_BESTFIT | M_FIRSTFIT)
80
81#define	BT_FLAGS	(M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM)
82
83#define	QC_NAME_MAX	16
84
85/*
86 * Data structures private to vmem.
87 */
88MALLOC_DEFINE(M_VMEM, "vmem", "vmem internal structures");
89
90typedef struct vmem_btag bt_t;
91
92TAILQ_HEAD(vmem_seglist, vmem_btag);
93LIST_HEAD(vmem_freelist, vmem_btag);
94LIST_HEAD(vmem_hashlist, vmem_btag);
95
96struct qcache {
97	uma_zone_t	qc_cache;
98	vmem_t 		*qc_vmem;
99	vmem_size_t	qc_size;
100	char		qc_name[QC_NAME_MAX];
101};
102typedef struct qcache qcache_t;
103#define	QC_POOL_TO_QCACHE(pool)	((qcache_t *)(pool->pr_qcache))
104
105#define	VMEM_NAME_MAX	16
106
107/* vmem arena */
108struct vmem {
109	struct mtx_padalign	vm_lock;
110	struct cv		vm_cv;
111	char			vm_name[VMEM_NAME_MAX+1];
112	LIST_ENTRY(vmem)	vm_alllist;
113	struct vmem_hashlist	vm_hash0[VMEM_HASHSIZE_MIN];
114	struct vmem_freelist	vm_freelist[VMEM_MAXORDER];
115	struct vmem_seglist	vm_seglist;
116	struct vmem_hashlist	*vm_hashlist;
117	vmem_size_t		vm_hashsize;
118
119	/* Constant after init */
120	vmem_size_t		vm_qcache_max;
121	vmem_size_t		vm_quantum_mask;
122	vmem_size_t		vm_import_quantum;
123	int			vm_quantum_shift;
124
125	/* Written on alloc/free */
126	LIST_HEAD(, vmem_btag)	vm_freetags;
127	int			vm_nfreetags;
128	int			vm_nbusytag;
129	vmem_size_t		vm_inuse;
130	vmem_size_t		vm_size;
131
132	/* Used on import. */
133	vmem_import_t		*vm_importfn;
134	vmem_release_t		*vm_releasefn;
135	void			*vm_arg;
136
137	/* Space exhaustion callback. */
138	vmem_reclaim_t		*vm_reclaimfn;
139
140	/* quantum cache */
141	qcache_t		vm_qcache[VMEM_QCACHE_IDX_MAX];
142};
143
144/* boundary tag */
145struct vmem_btag {
146	TAILQ_ENTRY(vmem_btag) bt_seglist;
147	union {
148		LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */
149		LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */
150	} bt_u;
151#define	bt_hashlist	bt_u.u_hashlist
152#define	bt_freelist	bt_u.u_freelist
153	vmem_addr_t	bt_start;
154	vmem_size_t	bt_size;
155	int		bt_type;
156};
157
158#define	BT_TYPE_SPAN		1	/* Allocated from importfn */
159#define	BT_TYPE_SPAN_STATIC	2	/* vmem_add() or create. */
160#define	BT_TYPE_FREE		3	/* Available space. */
161#define	BT_TYPE_BUSY		4	/* Used space. */
162#define	BT_ISSPAN_P(bt)	((bt)->bt_type <= BT_TYPE_SPAN_STATIC)
163
164#define	BT_END(bt)	((bt)->bt_start + (bt)->bt_size - 1)
165
166#if defined(DIAGNOSTIC)
167static void vmem_check(vmem_t *);
168#endif
169
170static struct callout	vmem_periodic_ch;
171static int		vmem_periodic_interval;
172static struct task	vmem_periodic_wk;
173
174static struct mtx_padalign vmem_list_lock;
175static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
176
177/* ---- misc */
178#define	VMEM_CONDVAR_INIT(vm, wchan)	cv_init(&vm->vm_cv, wchan)
179#define	VMEM_CONDVAR_DESTROY(vm)	cv_destroy(&vm->vm_cv)
180#define	VMEM_CONDVAR_WAIT(vm)		cv_wait(&vm->vm_cv, &vm->vm_lock)
181#define	VMEM_CONDVAR_BROADCAST(vm)	cv_broadcast(&vm->vm_cv)
182
183
184#define	VMEM_LOCK(vm)		mtx_lock(&vm->vm_lock)
185#define	VMEM_TRYLOCK(vm)	mtx_trylock(&vm->vm_lock)
186#define	VMEM_UNLOCK(vm)		mtx_unlock(&vm->vm_lock)
187#define	VMEM_LOCK_INIT(vm, name) mtx_init(&vm->vm_lock, (name), NULL, MTX_DEF)
188#define	VMEM_LOCK_DESTROY(vm)	mtx_destroy(&vm->vm_lock)
189#define	VMEM_ASSERT_LOCKED(vm)	mtx_assert(&vm->vm_lock, MA_OWNED);
190
191#define	VMEM_ALIGNUP(addr, align)	(-(-(addr) & -(align)))
192
193#define	VMEM_CROSS_P(addr1, addr2, boundary) \
194	((((addr1) ^ (addr2)) & -(boundary)) != 0)
195
196#define	ORDER2SIZE(order)	((vmem_size_t)1 << (order))
197#define	SIZE2ORDER(size)	((int)flsl(size) - 1)
198
199/*
200 * Maximum number of boundary tags that may be required to satisfy an
201 * allocation.  Two may be required to import.  Another two may be
202 * required to clip edges.
203 */
204#define	BT_MAXALLOC	4
205
206/*
207 * Max free limits the number of locally cached boundary tags.  We
208 * just want to avoid hitting the zone allocator for every call.
209 */
210#define BT_MAXFREE	(BT_MAXALLOC * 8)
211
212/* Allocator for boundary tags. */
213static uma_zone_t vmem_bt_zone;
214
215/* boot time arena storage. */
216static struct vmem buffer_arena_storage;
217static struct vmem transient_arena_storage;
218vmem_t *buffer_arena = &buffer_arena_storage;
219vmem_t *transient_arena = &transient_arena_storage;
220
221/*
222 * Fill the vmem's boundary tag cache.  We guarantee that boundary tag
223 * allocation will not fail once bt_fill() passes.  To do so we cache
224 * at least the maximum possible tag allocations in the arena.
225 */
226static int
227bt_fill(vmem_t *vm, int flags)
228{
229	bt_t *bt;
230
231	VMEM_ASSERT_LOCKED(vm);
232
233	/*
234	 * Loop until we meet the reserve.  To minimize the lock shuffle
235	 * and prevent simultaneous fills we first try a NOWAIT regardless
236	 * of the caller's flags.  Specify M_NOVM so we don't recurse while
237	 * holding a vmem lock.
238	 */
239	while (vm->vm_nfreetags < BT_MAXALLOC) {
240		bt = uma_zalloc(vmem_bt_zone,
241		    (flags & M_USE_RESERVE) | M_NOWAIT | M_NOVM);
242		if (bt == NULL) {
243			VMEM_UNLOCK(vm);
244			bt = uma_zalloc(vmem_bt_zone, flags);
245			VMEM_LOCK(vm);
246			if (bt == NULL && (flags & M_NOWAIT) != 0)
247				break;
248		}
249		LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
250		vm->vm_nfreetags++;
251	}
252
253	if (vm->vm_nfreetags < BT_MAXALLOC)
254		return ENOMEM;
255
256	return 0;
257}
258
259/*
260 * Pop a tag off of the freetag stack.
261 */
262static bt_t *
263bt_alloc(vmem_t *vm)
264{
265	bt_t *bt;
266
267	VMEM_ASSERT_LOCKED(vm);
268	bt = LIST_FIRST(&vm->vm_freetags);
269	MPASS(bt != NULL);
270	LIST_REMOVE(bt, bt_freelist);
271	vm->vm_nfreetags--;
272
273	return bt;
274}
275
276/*
277 * Trim the per-vmem free list.  Returns with the lock released to
278 * avoid allocator recursions.
279 */
280static void
281bt_freetrim(vmem_t *vm, int freelimit)
282{
283	LIST_HEAD(, vmem_btag) freetags;
284	bt_t *bt;
285
286	LIST_INIT(&freetags);
287	VMEM_ASSERT_LOCKED(vm);
288	while (vm->vm_nfreetags > freelimit) {
289		bt = LIST_FIRST(&vm->vm_freetags);
290		LIST_REMOVE(bt, bt_freelist);
291		vm->vm_nfreetags--;
292		LIST_INSERT_HEAD(&freetags, bt, bt_freelist);
293	}
294	VMEM_UNLOCK(vm);
295	while ((bt = LIST_FIRST(&freetags)) != NULL) {
296		LIST_REMOVE(bt, bt_freelist);
297		uma_zfree(vmem_bt_zone, bt);
298	}
299}
300
301static inline void
302bt_free(vmem_t *vm, bt_t *bt)
303{
304
305	VMEM_ASSERT_LOCKED(vm);
306	MPASS(LIST_FIRST(&vm->vm_freetags) != bt);
307	LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
308	vm->vm_nfreetags++;
309}
310
311/*
312 * freelist[0] ... [1, 1]
313 * freelist[1] ... [2, 3]
314 * freelist[2] ... [4, 7]
315 * freelist[3] ... [8, 15]
316 *  :
317 * freelist[n] ... [(1 << n), (1 << (n + 1)) - 1]
318 *  :
319 */
320
321static struct vmem_freelist *
322bt_freehead_tofree(vmem_t *vm, vmem_size_t size)
323{
324	const vmem_size_t qsize = size >> vm->vm_quantum_shift;
325	const int idx = SIZE2ORDER(qsize);
326
327	MPASS(size != 0 && qsize != 0);
328	MPASS((size & vm->vm_quantum_mask) == 0);
329	MPASS(idx >= 0);
330	MPASS(idx < VMEM_MAXORDER);
331
332	return &vm->vm_freelist[idx];
333}
334
335/*
336 * bt_freehead_toalloc: return the freelist for the given size and allocation
337 * strategy.
338 *
339 * For M_FIRSTFIT, return the list in which any blocks are large enough
340 * for the requested size.  otherwise, return the list which can have blocks
341 * large enough for the requested size.
342 */
343static struct vmem_freelist *
344bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, int strat)
345{
346	const vmem_size_t qsize = size >> vm->vm_quantum_shift;
347	int idx = SIZE2ORDER(qsize);
348
349	MPASS(size != 0 && qsize != 0);
350	MPASS((size & vm->vm_quantum_mask) == 0);
351
352	if (strat == M_FIRSTFIT && ORDER2SIZE(idx) != qsize) {
353		idx++;
354		/* check too large request? */
355	}
356	MPASS(idx >= 0);
357	MPASS(idx < VMEM_MAXORDER);
358
359	return &vm->vm_freelist[idx];
360}
361
362/* ---- boundary tag hash */
363
364static struct vmem_hashlist *
365bt_hashhead(vmem_t *vm, vmem_addr_t addr)
366{
367	struct vmem_hashlist *list;
368	unsigned int hash;
369
370	hash = hash32_buf(&addr, sizeof(addr), 0);
371	list = &vm->vm_hashlist[hash % vm->vm_hashsize];
372
373	return list;
374}
375
376static bt_t *
377bt_lookupbusy(vmem_t *vm, vmem_addr_t addr)
378{
379	struct vmem_hashlist *list;
380	bt_t *bt;
381
382	VMEM_ASSERT_LOCKED(vm);
383	list = bt_hashhead(vm, addr);
384	LIST_FOREACH(bt, list, bt_hashlist) {
385		if (bt->bt_start == addr) {
386			break;
387		}
388	}
389
390	return bt;
391}
392
393static void
394bt_rembusy(vmem_t *vm, bt_t *bt)
395{
396
397	VMEM_ASSERT_LOCKED(vm);
398	MPASS(vm->vm_nbusytag > 0);
399	vm->vm_inuse -= bt->bt_size;
400	vm->vm_nbusytag--;
401	LIST_REMOVE(bt, bt_hashlist);
402}
403
404static void
405bt_insbusy(vmem_t *vm, bt_t *bt)
406{
407	struct vmem_hashlist *list;
408
409	VMEM_ASSERT_LOCKED(vm);
410	MPASS(bt->bt_type == BT_TYPE_BUSY);
411
412	list = bt_hashhead(vm, bt->bt_start);
413	LIST_INSERT_HEAD(list, bt, bt_hashlist);
414	vm->vm_nbusytag++;
415	vm->vm_inuse += bt->bt_size;
416}
417
418/* ---- boundary tag list */
419
420static void
421bt_remseg(vmem_t *vm, bt_t *bt)
422{
423
424	TAILQ_REMOVE(&vm->vm_seglist, bt, bt_seglist);
425	bt_free(vm, bt);
426}
427
428static void
429bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev)
430{
431
432	TAILQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist);
433}
434
435static void
436bt_insseg_tail(vmem_t *vm, bt_t *bt)
437{
438
439	TAILQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist);
440}
441
442static void
443bt_remfree(vmem_t *vm, bt_t *bt)
444{
445
446	MPASS(bt->bt_type == BT_TYPE_FREE);
447
448	LIST_REMOVE(bt, bt_freelist);
449}
450
451static void
452bt_insfree(vmem_t *vm, bt_t *bt)
453{
454	struct vmem_freelist *list;
455
456	list = bt_freehead_tofree(vm, bt->bt_size);
457	LIST_INSERT_HEAD(list, bt, bt_freelist);
458}
459
460/* ---- vmem internal functions */
461
462/*
463 * Import from the arena into the quantum cache in UMA.
464 */
465static int
466qc_import(void *arg, void **store, int cnt, int flags)
467{
468	qcache_t *qc;
469	vmem_addr_t addr;
470	int i;
471
472	qc = arg;
473	flags |= M_BESTFIT;
474	for (i = 0; i < cnt; i++) {
475		if (vmem_xalloc(qc->qc_vmem, qc->qc_size, 0, 0, 0,
476		    VMEM_ADDR_MIN, VMEM_ADDR_MAX, flags, &addr) != 0)
477			break;
478		store[i] = (void *)addr;
479		/* Only guarantee one allocation. */
480		flags &= ~M_WAITOK;
481		flags |= M_NOWAIT;
482	}
483	return i;
484}
485
486/*
487 * Release memory from the UMA cache to the arena.
488 */
489static void
490qc_release(void *arg, void **store, int cnt)
491{
492	qcache_t *qc;
493	int i;
494
495	qc = arg;
496	for (i = 0; i < cnt; i++)
497		vmem_xfree(qc->qc_vmem, (vmem_addr_t)store[i], qc->qc_size);
498}
499
500static void
501qc_init(vmem_t *vm, vmem_size_t qcache_max)
502{
503	qcache_t *qc;
504	vmem_size_t size;
505	int qcache_idx_max;
506	int i;
507
508	MPASS((qcache_max & vm->vm_quantum_mask) == 0);
509	qcache_idx_max = MIN(qcache_max >> vm->vm_quantum_shift,
510	    VMEM_QCACHE_IDX_MAX);
511	vm->vm_qcache_max = qcache_idx_max << vm->vm_quantum_shift;
512	for (i = 0; i < qcache_idx_max; i++) {
513		qc = &vm->vm_qcache[i];
514		size = (i + 1) << vm->vm_quantum_shift;
515		snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu",
516		    vm->vm_name, size);
517		qc->qc_vmem = vm;
518		qc->qc_size = size;
519		qc->qc_cache = uma_zcache_create(qc->qc_name, size,
520		    NULL, NULL, NULL, NULL, qc_import, qc_release, qc,
521		    UMA_ZONE_VM);
522		MPASS(qc->qc_cache);
523	}
524}
525
526static void
527qc_destroy(vmem_t *vm)
528{
529	int qcache_idx_max;
530	int i;
531
532	qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
533	for (i = 0; i < qcache_idx_max; i++)
534		uma_zdestroy(vm->vm_qcache[i].qc_cache);
535}
536
537static void
538qc_drain(vmem_t *vm)
539{
540	int qcache_idx_max;
541	int i;
542
543	qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
544	for (i = 0; i < qcache_idx_max; i++)
545		zone_drain(vm->vm_qcache[i].qc_cache);
546}
547
548void
549vmem_startup(void)
550{
551
552	mtx_init(&vmem_list_lock, "vmem list lock", NULL, MTX_DEF);
553	vmem_bt_zone = uma_zcreate("vmem btag",
554	    sizeof(struct vmem_btag), NULL, NULL, NULL, NULL,
555	    UMA_ALIGN_PTR, UMA_ZONE_VM);
556}
557
558/* ---- rehash */
559
560static int
561vmem_rehash(vmem_t *vm, vmem_size_t newhashsize)
562{
563	bt_t *bt;
564	int i;
565	struct vmem_hashlist *newhashlist;
566	struct vmem_hashlist *oldhashlist;
567	vmem_size_t oldhashsize;
568
569	MPASS(newhashsize > 0);
570
571	newhashlist = malloc(sizeof(struct vmem_hashlist) * newhashsize,
572	    M_VMEM, M_NOWAIT);
573	if (newhashlist == NULL)
574		return ENOMEM;
575	for (i = 0; i < newhashsize; i++) {
576		LIST_INIT(&newhashlist[i]);
577	}
578
579	VMEM_LOCK(vm);
580	oldhashlist = vm->vm_hashlist;
581	oldhashsize = vm->vm_hashsize;
582	vm->vm_hashlist = newhashlist;
583	vm->vm_hashsize = newhashsize;
584	if (oldhashlist == NULL) {
585		VMEM_UNLOCK(vm);
586		return 0;
587	}
588	for (i = 0; i < oldhashsize; i++) {
589		while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) {
590			bt_rembusy(vm, bt);
591			bt_insbusy(vm, bt);
592		}
593	}
594	VMEM_UNLOCK(vm);
595
596	if (oldhashlist != vm->vm_hash0) {
597		free(oldhashlist, M_VMEM);
598	}
599
600	return 0;
601}
602
603static void
604vmem_periodic_kick(void *dummy)
605{
606
607	taskqueue_enqueue(taskqueue_thread, &vmem_periodic_wk);
608}
609
610static void
611vmem_periodic(void *unused, int pending)
612{
613	vmem_t *vm;
614	vmem_size_t desired;
615	vmem_size_t current;
616
617	mtx_lock(&vmem_list_lock);
618	LIST_FOREACH(vm, &vmem_list, vm_alllist) {
619#ifdef DIAGNOSTIC
620		/* Convenient time to verify vmem state. */
621		VMEM_LOCK(vm);
622		vmem_check(vm);
623		VMEM_UNLOCK(vm);
624#endif
625		desired = 1 << flsl(vm->vm_nbusytag);
626		desired = MIN(MAX(desired, VMEM_HASHSIZE_MIN),
627		    VMEM_HASHSIZE_MAX);
628		current = vm->vm_hashsize;
629
630		/* Grow in powers of two.  Shrink less aggressively. */
631		if (desired >= current * 2 || desired * 4 <= current)
632			vmem_rehash(vm, desired);
633	}
634	mtx_unlock(&vmem_list_lock);
635
636	callout_reset(&vmem_periodic_ch, vmem_periodic_interval,
637	    vmem_periodic_kick, NULL);
638}
639
640static void
641vmem_start_callout(void *unused)
642{
643
644	TASK_INIT(&vmem_periodic_wk, 0, vmem_periodic, NULL);
645	vmem_periodic_interval = hz * 10;
646	callout_init(&vmem_periodic_ch, CALLOUT_MPSAFE);
647	callout_reset(&vmem_periodic_ch, vmem_periodic_interval,
648	    vmem_periodic_kick, NULL);
649}
650SYSINIT(vfs, SI_SUB_CONFIGURE, SI_ORDER_ANY, vmem_start_callout, NULL);
651
652static void
653vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int type)
654{
655	bt_t *btspan;
656	bt_t *btfree;
657
658	MPASS(type == BT_TYPE_SPAN || type == BT_TYPE_SPAN_STATIC);
659
660	btspan = bt_alloc(vm);
661	btspan->bt_type = type;
662	btspan->bt_start = addr;
663	btspan->bt_size = size;
664
665	btfree = bt_alloc(vm);
666	btfree->bt_type = BT_TYPE_FREE;
667	btfree->bt_start = addr;
668	btfree->bt_size = size;
669
670	bt_insseg_tail(vm, btspan);
671	bt_insseg(vm, btfree, btspan);
672	bt_insfree(vm, btfree);
673	vm->vm_size += size;
674}
675
676static void
677vmem_destroy1(vmem_t *vm)
678{
679	bt_t *bt;
680
681	/*
682	 * Drain per-cpu quantum caches.
683	 */
684	qc_destroy(vm);
685
686	/*
687	 * The vmem should now only contain empty segments.
688	 */
689	VMEM_LOCK(vm);
690	MPASS(vm->vm_nbusytag == 0);
691
692	while ((bt = TAILQ_FIRST(&vm->vm_seglist)) != NULL)
693		bt_remseg(vm, bt);
694
695	if (vm->vm_hashlist != NULL && vm->vm_hashlist != vm->vm_hash0)
696		free(vm->vm_hashlist, M_VMEM);
697
698	bt_freetrim(vm, 0);
699
700	VMEM_CONDVAR_DESTROY(vm);
701	VMEM_LOCK_DESTROY(vm);
702	free(vm, M_VMEM);
703}
704
705static int
706vmem_import(vmem_t *vm, vmem_size_t size, int flags)
707{
708	vmem_addr_t addr;
709	int error;
710
711	if (vm->vm_importfn == NULL)
712		return EINVAL;
713
714	size = roundup(size, vm->vm_import_quantum);
715
716	/*
717	 * Hide MAXALLOC tags so we're guaranteed to be able to add this
718	 * span and the tag we want to allocate from it.
719	 */
720	MPASS(vm->vm_nfreetags >= BT_MAXALLOC);
721	vm->vm_nfreetags -= BT_MAXALLOC;
722	VMEM_UNLOCK(vm);
723	error = (vm->vm_importfn)(vm->vm_arg, size, flags, &addr);
724	VMEM_LOCK(vm);
725	vm->vm_nfreetags += BT_MAXALLOC;
726	if (error)
727		return ENOMEM;
728
729	vmem_add1(vm, addr, size, BT_TYPE_SPAN);
730
731	return 0;
732}
733
734/*
735 * vmem_fit: check if a bt can satisfy the given restrictions.
736 *
737 * it's a caller's responsibility to ensure the region is big enough
738 * before calling us.
739 */
740static int
741vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align,
742    vmem_size_t phase, vmem_size_t nocross, vmem_addr_t minaddr,
743    vmem_addr_t maxaddr, vmem_addr_t *addrp)
744{
745	vmem_addr_t start;
746	vmem_addr_t end;
747
748	MPASS(size > 0);
749	MPASS(bt->bt_size >= size); /* caller's responsibility */
750
751	/*
752	 * XXX assumption: vmem_addr_t and vmem_size_t are
753	 * unsigned integer of the same size.
754	 */
755
756	start = bt->bt_start;
757	if (start < minaddr) {
758		start = minaddr;
759	}
760	end = BT_END(bt);
761	if (end > maxaddr)
762		end = maxaddr;
763	if (start > end)
764		return (ENOMEM);
765
766	start = VMEM_ALIGNUP(start - phase, align) + phase;
767	if (start < bt->bt_start)
768		start += align;
769	if (VMEM_CROSS_P(start, start + size - 1, nocross)) {
770		MPASS(align < nocross);
771		start = VMEM_ALIGNUP(start - phase, nocross) + phase;
772	}
773	if (start <= end && end - start >= size - 1) {
774		MPASS((start & (align - 1)) == phase);
775		MPASS(!VMEM_CROSS_P(start, start + size - 1, nocross));
776		MPASS(minaddr <= start);
777		MPASS(maxaddr == 0 || start + size - 1 <= maxaddr);
778		MPASS(bt->bt_start <= start);
779		MPASS(BT_END(bt) - start >= size - 1);
780		*addrp = start;
781
782		return (0);
783	}
784	return (ENOMEM);
785}
786
787/*
788 * vmem_clip:  Trim the boundary tag edges to the requested start and size.
789 */
790static void
791vmem_clip(vmem_t *vm, bt_t *bt, vmem_addr_t start, vmem_size_t size)
792{
793	bt_t *btnew;
794	bt_t *btprev;
795
796	VMEM_ASSERT_LOCKED(vm);
797	MPASS(bt->bt_type == BT_TYPE_FREE);
798	MPASS(bt->bt_size >= size);
799	bt_remfree(vm, bt);
800	if (bt->bt_start != start) {
801		btprev = bt_alloc(vm);
802		btprev->bt_type = BT_TYPE_FREE;
803		btprev->bt_start = bt->bt_start;
804		btprev->bt_size = start - bt->bt_start;
805		bt->bt_start = start;
806		bt->bt_size -= btprev->bt_size;
807		bt_insfree(vm, btprev);
808		bt_insseg(vm, btprev,
809		    TAILQ_PREV(bt, vmem_seglist, bt_seglist));
810	}
811	MPASS(bt->bt_start == start);
812	if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) {
813		/* split */
814		btnew = bt_alloc(vm);
815		btnew->bt_type = BT_TYPE_BUSY;
816		btnew->bt_start = bt->bt_start;
817		btnew->bt_size = size;
818		bt->bt_start = bt->bt_start + size;
819		bt->bt_size -= size;
820		bt_insfree(vm, bt);
821		bt_insseg(vm, btnew,
822		    TAILQ_PREV(bt, vmem_seglist, bt_seglist));
823		bt_insbusy(vm, btnew);
824		bt = btnew;
825	} else {
826		bt->bt_type = BT_TYPE_BUSY;
827		bt_insbusy(vm, bt);
828	}
829	MPASS(bt->bt_size >= size);
830	bt->bt_type = BT_TYPE_BUSY;
831}
832
833/* ---- vmem API */
834
835void
836vmem_set_import(vmem_t *vm, vmem_import_t *importfn,
837     vmem_release_t *releasefn, void *arg, vmem_size_t import_quantum)
838{
839
840	VMEM_LOCK(vm);
841	vm->vm_importfn = importfn;
842	vm->vm_releasefn = releasefn;
843	vm->vm_arg = arg;
844	vm->vm_import_quantum = import_quantum;
845	VMEM_UNLOCK(vm);
846}
847
848void
849vmem_set_reclaim(vmem_t *vm, vmem_reclaim_t *reclaimfn)
850{
851
852	VMEM_LOCK(vm);
853	vm->vm_reclaimfn = reclaimfn;
854	VMEM_UNLOCK(vm);
855}
856
857/*
858 * vmem_init: Initializes vmem arena.
859 */
860vmem_t *
861vmem_init(vmem_t *vm, const char *name, vmem_addr_t base, vmem_size_t size,
862    vmem_size_t quantum, vmem_size_t qcache_max, int flags)
863{
864	int i;
865
866	MPASS(quantum > 0);
867
868	bzero(vm, sizeof(*vm));
869
870	VMEM_CONDVAR_INIT(vm, name);
871	VMEM_LOCK_INIT(vm, name);
872	vm->vm_nfreetags = 0;
873	LIST_INIT(&vm->vm_freetags);
874	strlcpy(vm->vm_name, name, sizeof(vm->vm_name));
875	vm->vm_quantum_mask = quantum - 1;
876	vm->vm_quantum_shift = SIZE2ORDER(quantum);
877	MPASS(ORDER2SIZE(vm->vm_quantum_shift) == quantum);
878	vm->vm_nbusytag = 0;
879	vm->vm_size = 0;
880	vm->vm_inuse = 0;
881	qc_init(vm, qcache_max);
882
883	TAILQ_INIT(&vm->vm_seglist);
884	for (i = 0; i < VMEM_MAXORDER; i++) {
885		LIST_INIT(&vm->vm_freelist[i]);
886	}
887	memset(&vm->vm_hash0, 0, sizeof(vm->vm_hash0));
888	vm->vm_hashsize = VMEM_HASHSIZE_MIN;
889	vm->vm_hashlist = vm->vm_hash0;
890
891	if (size != 0) {
892		if (vmem_add(vm, base, size, flags) != 0) {
893			vmem_destroy1(vm);
894			return NULL;
895		}
896	}
897
898	mtx_lock(&vmem_list_lock);
899	LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist);
900	mtx_unlock(&vmem_list_lock);
901
902	return vm;
903}
904
905/*
906 * vmem_create: create an arena.
907 */
908vmem_t *
909vmem_create(const char *name, vmem_addr_t base, vmem_size_t size,
910    vmem_size_t quantum, vmem_size_t qcache_max, int flags)
911{
912
913	vmem_t *vm;
914
915	vm = malloc(sizeof(*vm), M_VMEM, flags & (M_WAITOK|M_NOWAIT));
916	if (vm == NULL)
917		return (NULL);
918	if (vmem_init(vm, name, base, size, quantum, qcache_max,
919	    flags) == NULL) {
920		free(vm, M_VMEM);
921		return (NULL);
922	}
923	return (vm);
924}
925
926void
927vmem_destroy(vmem_t *vm)
928{
929
930	mtx_lock(&vmem_list_lock);
931	LIST_REMOVE(vm, vm_alllist);
932	mtx_unlock(&vmem_list_lock);
933
934	vmem_destroy1(vm);
935}
936
937vmem_size_t
938vmem_roundup_size(vmem_t *vm, vmem_size_t size)
939{
940
941	return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask;
942}
943
944/*
945 * vmem_alloc: allocate resource from the arena.
946 */
947int
948vmem_alloc(vmem_t *vm, vmem_size_t size, int flags, vmem_addr_t *addrp)
949{
950	const int strat __unused = flags & VMEM_FITMASK;
951	qcache_t *qc;
952
953	flags &= VMEM_FLAGS;
954	MPASS(size > 0);
955	MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT);
956	if ((flags & M_NOWAIT) == 0)
957		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_alloc");
958
959	if (size <= vm->vm_qcache_max) {
960		qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift];
961		*addrp = (vmem_addr_t)uma_zalloc(qc->qc_cache, flags);
962		if (*addrp == 0)
963			return (ENOMEM);
964		return (0);
965	}
966
967	return vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
968	    flags, addrp);
969}
970
971int
972vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align,
973    const vmem_size_t phase, const vmem_size_t nocross,
974    const vmem_addr_t minaddr, const vmem_addr_t maxaddr, int flags,
975    vmem_addr_t *addrp)
976{
977	const vmem_size_t size = vmem_roundup_size(vm, size0);
978	struct vmem_freelist *list;
979	struct vmem_freelist *first;
980	struct vmem_freelist *end;
981	vmem_size_t avail;
982	bt_t *bt;
983	int error;
984	int strat;
985
986	flags &= VMEM_FLAGS;
987	strat = flags & VMEM_FITMASK;
988	MPASS(size0 > 0);
989	MPASS(size > 0);
990	MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT);
991	MPASS((flags & (M_NOWAIT|M_WAITOK)) != (M_NOWAIT|M_WAITOK));
992	if ((flags & M_NOWAIT) == 0)
993		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_xalloc");
994	MPASS((align & vm->vm_quantum_mask) == 0);
995	MPASS((align & (align - 1)) == 0);
996	MPASS((phase & vm->vm_quantum_mask) == 0);
997	MPASS((nocross & vm->vm_quantum_mask) == 0);
998	MPASS((nocross & (nocross - 1)) == 0);
999	MPASS((align == 0 && phase == 0) || phase < align);
1000	MPASS(nocross == 0 || nocross >= size);
1001	MPASS(minaddr <= maxaddr);
1002	MPASS(!VMEM_CROSS_P(phase, phase + size - 1, nocross));
1003
1004	if (align == 0)
1005		align = vm->vm_quantum_mask + 1;
1006
1007	*addrp = 0;
1008	end = &vm->vm_freelist[VMEM_MAXORDER];
1009	/*
1010	 * choose a free block from which we allocate.
1011	 */
1012	first = bt_freehead_toalloc(vm, size, strat);
1013	VMEM_LOCK(vm);
1014	for (;;) {
1015		/*
1016		 * Make sure we have enough tags to complete the
1017		 * operation.
1018		 */
1019		if (vm->vm_nfreetags < BT_MAXALLOC &&
1020		    bt_fill(vm, flags) != 0) {
1021			error = ENOMEM;
1022			break;
1023		}
1024		/*
1025	 	 * Scan freelists looking for a tag that satisfies the
1026		 * allocation.  If we're doing BESTFIT we may encounter
1027		 * sizes below the request.  If we're doing FIRSTFIT we
1028		 * inspect only the first element from each list.
1029		 */
1030		for (list = first; list < end; list++) {
1031			LIST_FOREACH(bt, list, bt_freelist) {
1032				if (bt->bt_size >= size) {
1033					error = vmem_fit(bt, size, align, phase,
1034					    nocross, minaddr, maxaddr, addrp);
1035					if (error == 0) {
1036						vmem_clip(vm, bt, *addrp, size);
1037						goto out;
1038					}
1039				}
1040				/* FIRST skips to the next list. */
1041				if (strat == M_FIRSTFIT)
1042					break;
1043			}
1044		}
1045		/*
1046		 * Retry if the fast algorithm failed.
1047		 */
1048		if (strat == M_FIRSTFIT) {
1049			strat = M_BESTFIT;
1050			first = bt_freehead_toalloc(vm, size, strat);
1051			continue;
1052		}
1053		/*
1054		 * XXX it is possible to fail to meet restrictions with the
1055		 * imported region.  It is up to the user to specify the
1056		 * import quantum such that it can satisfy any allocation.
1057		 */
1058		if (vmem_import(vm, size, flags) == 0)
1059			continue;
1060
1061		/*
1062		 * Try to free some space from the quantum cache or reclaim
1063		 * functions if available.
1064		 */
1065		if (vm->vm_qcache_max != 0 || vm->vm_reclaimfn != NULL) {
1066			avail = vm->vm_size - vm->vm_inuse;
1067			VMEM_UNLOCK(vm);
1068			if (vm->vm_qcache_max != 0)
1069				qc_drain(vm);
1070			if (vm->vm_reclaimfn != NULL)
1071				vm->vm_reclaimfn(vm, flags);
1072			VMEM_LOCK(vm);
1073			/* If we were successful retry even NOWAIT. */
1074			if (vm->vm_size - vm->vm_inuse > avail)
1075				continue;
1076		}
1077		if ((flags & M_NOWAIT) != 0) {
1078			error = ENOMEM;
1079			break;
1080		}
1081		VMEM_CONDVAR_WAIT(vm);
1082	}
1083out:
1084	VMEM_UNLOCK(vm);
1085	if (error != 0 && (flags & M_NOWAIT) == 0)
1086		panic("failed to allocate waiting allocation\n");
1087
1088	return (error);
1089}
1090
1091/*
1092 * vmem_free: free the resource to the arena.
1093 */
1094void
1095vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1096{
1097	qcache_t *qc;
1098	MPASS(size > 0);
1099
1100	if (size <= vm->vm_qcache_max) {
1101		qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift];
1102		uma_zfree(qc->qc_cache, (void *)addr);
1103	} else
1104		vmem_xfree(vm, addr, size);
1105}
1106
1107void
1108vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1109{
1110	bt_t *bt;
1111	bt_t *t;
1112
1113	MPASS(size > 0);
1114
1115	VMEM_LOCK(vm);
1116	bt = bt_lookupbusy(vm, addr);
1117	MPASS(bt != NULL);
1118	MPASS(bt->bt_start == addr);
1119	MPASS(bt->bt_size == vmem_roundup_size(vm, size) ||
1120	    bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask);
1121	MPASS(bt->bt_type == BT_TYPE_BUSY);
1122	bt_rembusy(vm, bt);
1123	bt->bt_type = BT_TYPE_FREE;
1124
1125	/* coalesce */
1126	t = TAILQ_NEXT(bt, bt_seglist);
1127	if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1128		MPASS(BT_END(bt) < t->bt_start);	/* YYY */
1129		bt->bt_size += t->bt_size;
1130		bt_remfree(vm, t);
1131		bt_remseg(vm, t);
1132	}
1133	t = TAILQ_PREV(bt, vmem_seglist, bt_seglist);
1134	if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1135		MPASS(BT_END(t) < bt->bt_start);	/* YYY */
1136		bt->bt_size += t->bt_size;
1137		bt->bt_start = t->bt_start;
1138		bt_remfree(vm, t);
1139		bt_remseg(vm, t);
1140	}
1141
1142	t = TAILQ_PREV(bt, vmem_seglist, bt_seglist);
1143	MPASS(t != NULL);
1144	MPASS(BT_ISSPAN_P(t) || t->bt_type == BT_TYPE_BUSY);
1145	if (vm->vm_releasefn != NULL && t->bt_type == BT_TYPE_SPAN &&
1146	    t->bt_size == bt->bt_size) {
1147		vmem_addr_t spanaddr;
1148		vmem_size_t spansize;
1149
1150		MPASS(t->bt_start == bt->bt_start);
1151		spanaddr = bt->bt_start;
1152		spansize = bt->bt_size;
1153		bt_remseg(vm, bt);
1154		bt_remseg(vm, t);
1155		vm->vm_size -= spansize;
1156		VMEM_CONDVAR_BROADCAST(vm);
1157		bt_freetrim(vm, BT_MAXFREE);
1158		(*vm->vm_releasefn)(vm->vm_arg, spanaddr, spansize);
1159	} else {
1160		bt_insfree(vm, bt);
1161		VMEM_CONDVAR_BROADCAST(vm);
1162		bt_freetrim(vm, BT_MAXFREE);
1163	}
1164}
1165
1166/*
1167 * vmem_add:
1168 *
1169 */
1170int
1171vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int flags)
1172{
1173	int error;
1174
1175	error = 0;
1176	flags &= VMEM_FLAGS;
1177	VMEM_LOCK(vm);
1178	if (vm->vm_nfreetags >= BT_MAXALLOC || bt_fill(vm, flags) == 0)
1179		vmem_add1(vm, addr, size, BT_TYPE_SPAN_STATIC);
1180	else
1181		error = ENOMEM;
1182	VMEM_UNLOCK(vm);
1183
1184	return (error);
1185}
1186
1187/*
1188 * vmem_size: information about arenas size
1189 */
1190vmem_size_t
1191vmem_size(vmem_t *vm, int typemask)
1192{
1193
1194	switch (typemask) {
1195	case VMEM_ALLOC:
1196		return vm->vm_inuse;
1197	case VMEM_FREE:
1198		return vm->vm_size - vm->vm_inuse;
1199	case VMEM_FREE|VMEM_ALLOC:
1200		return vm->vm_size;
1201	default:
1202		panic("vmem_size");
1203	}
1204}
1205
1206/* ---- debug */
1207
1208#if defined(DDB) || defined(DIAGNOSTIC)
1209
1210static void bt_dump(const bt_t *, int (*)(const char *, ...)
1211    __printflike(1, 2));
1212
1213static const char *
1214bt_type_string(int type)
1215{
1216
1217	switch (type) {
1218	case BT_TYPE_BUSY:
1219		return "busy";
1220	case BT_TYPE_FREE:
1221		return "free";
1222	case BT_TYPE_SPAN:
1223		return "span";
1224	case BT_TYPE_SPAN_STATIC:
1225		return "static span";
1226	default:
1227		break;
1228	}
1229	return "BOGUS";
1230}
1231
1232static void
1233bt_dump(const bt_t *bt, int (*pr)(const char *, ...))
1234{
1235
1236	(*pr)("\t%p: %jx %jx, %d(%s)\n",
1237	    bt, (intmax_t)bt->bt_start, (intmax_t)bt->bt_size,
1238	    bt->bt_type, bt_type_string(bt->bt_type));
1239}
1240
1241static void
1242vmem_dump(const vmem_t *vm , int (*pr)(const char *, ...) __printflike(1, 2))
1243{
1244	const bt_t *bt;
1245	int i;
1246
1247	(*pr)("vmem %p '%s'\n", vm, vm->vm_name);
1248	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1249		bt_dump(bt, pr);
1250	}
1251
1252	for (i = 0; i < VMEM_MAXORDER; i++) {
1253		const struct vmem_freelist *fl = &vm->vm_freelist[i];
1254
1255		if (LIST_EMPTY(fl)) {
1256			continue;
1257		}
1258
1259		(*pr)("freelist[%d]\n", i);
1260		LIST_FOREACH(bt, fl, bt_freelist) {
1261			bt_dump(bt, pr);
1262		}
1263	}
1264}
1265
1266#endif /* defined(DDB) || defined(DIAGNOSTIC) */
1267
1268#if defined(DDB)
1269static bt_t *
1270vmem_whatis_lookup(vmem_t *vm, vmem_addr_t addr)
1271{
1272	bt_t *bt;
1273
1274	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1275		if (BT_ISSPAN_P(bt)) {
1276			continue;
1277		}
1278		if (bt->bt_start <= addr && addr <= BT_END(bt)) {
1279			return bt;
1280		}
1281	}
1282
1283	return NULL;
1284}
1285
1286void
1287vmem_whatis(vmem_addr_t addr, int (*pr)(const char *, ...))
1288{
1289	vmem_t *vm;
1290
1291	LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1292		bt_t *bt;
1293
1294		bt = vmem_whatis_lookup(vm, addr);
1295		if (bt == NULL) {
1296			continue;
1297		}
1298		(*pr)("%p is %p+%zu in VMEM '%s' (%s)\n",
1299		    (void *)addr, (void *)bt->bt_start,
1300		    (vmem_size_t)(addr - bt->bt_start), vm->vm_name,
1301		    (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free");
1302	}
1303}
1304
1305void
1306vmem_printall(const char *modif, int (*pr)(const char *, ...))
1307{
1308	const vmem_t *vm;
1309
1310	LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1311		vmem_dump(vm, pr);
1312	}
1313}
1314
1315void
1316vmem_print(vmem_addr_t addr, const char *modif, int (*pr)(const char *, ...))
1317{
1318	const vmem_t *vm = (const void *)addr;
1319
1320	vmem_dump(vm, pr);
1321}
1322#endif /* defined(DDB) */
1323
1324#define vmem_printf printf
1325
1326#if defined(DIAGNOSTIC)
1327
1328static bool
1329vmem_check_sanity(vmem_t *vm)
1330{
1331	const bt_t *bt, *bt2;
1332
1333	MPASS(vm != NULL);
1334
1335	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1336		if (bt->bt_start > BT_END(bt)) {
1337			printf("corrupted tag\n");
1338			bt_dump(bt, vmem_printf);
1339			return false;
1340		}
1341	}
1342	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1343		TAILQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) {
1344			if (bt == bt2) {
1345				continue;
1346			}
1347			if (BT_ISSPAN_P(bt) != BT_ISSPAN_P(bt2)) {
1348				continue;
1349			}
1350			if (bt->bt_start <= BT_END(bt2) &&
1351			    bt2->bt_start <= BT_END(bt)) {
1352				printf("overwrapped tags\n");
1353				bt_dump(bt, vmem_printf);
1354				bt_dump(bt2, vmem_printf);
1355				return false;
1356			}
1357		}
1358	}
1359
1360	return true;
1361}
1362
1363static void
1364vmem_check(vmem_t *vm)
1365{
1366
1367	if (!vmem_check_sanity(vm)) {
1368		panic("insanity vmem %p", vm);
1369	}
1370}
1371
1372#endif /* defined(DIAGNOSTIC) */
1373