Deleted Added
sdiff udiff text old ( 280805 ) new ( 280850 )
full compact
1/*-
2 * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi,
3 * Copyright (c) 2013 EMC Corp.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28/*
29 * From:
30 * $NetBSD: vmem_impl.h,v 1.2 2013/01/29 21:26:24 para Exp $
31 * $NetBSD: subr_vmem.c,v 1.83 2013/03/06 11:20:10 yamt Exp $
32 */
33
34/*
35 * reference:
36 * - Magazines and Vmem: Extending the Slab Allocator
37 * to Many CPUs and Arbitrary Resources
38 * http://www.usenix.org/event/usenix01/bonwick.html
39 */
40
41#include <sys/cdefs.h>
42__FBSDID("$FreeBSD: head/sys/kern/subr_vmem.c 280805 2015-03-29 10:02:29Z mav $");
43
44#include "opt_ddb.h"
45
46#include <sys/param.h>
47#include <sys/systm.h>
48#include <sys/kernel.h>
49#include <sys/queue.h>
50#include <sys/callout.h>
51#include <sys/hash.h>
52#include <sys/lock.h>
53#include <sys/malloc.h>
54#include <sys/mutex.h>
55#include <sys/smp.h>
56#include <sys/condvar.h>
57#include <sys/sysctl.h>
58#include <sys/taskqueue.h>
59#include <sys/vmem.h>
60
61#include "opt_vm.h"
62
63#include <vm/uma.h>
64#include <vm/vm.h>
65#include <vm/pmap.h>
66#include <vm/vm_map.h>
67#include <vm/vm_object.h>
68#include <vm/vm_kern.h>
69#include <vm/vm_extern.h>
70#include <vm/vm_param.h>
71#include <vm/vm_pageout.h>
72
73#define VMEM_OPTORDER 5
74#define VMEM_OPTVALUE (1 << VMEM_OPTORDER)
75#define VMEM_MAXORDER \
76 (VMEM_OPTVALUE - 1 + sizeof(vmem_size_t) * NBBY - VMEM_OPTORDER)
77
78#define VMEM_HASHSIZE_MIN 16
79#define VMEM_HASHSIZE_MAX 131072
80
81#define VMEM_QCACHE_IDX_MAX 16
82
83#define VMEM_FITMASK (M_BESTFIT | M_FIRSTFIT)
84
85#define VMEM_FLAGS \
86 (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM | M_BESTFIT | M_FIRSTFIT)
87
88#define BT_FLAGS (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM)
89
90#define QC_NAME_MAX 16
91
92/*
93 * Data structures private to vmem.
94 */
95MALLOC_DEFINE(M_VMEM, "vmem", "vmem internal structures");
96
97typedef struct vmem_btag bt_t;
98
99TAILQ_HEAD(vmem_seglist, vmem_btag);
100LIST_HEAD(vmem_freelist, vmem_btag);
101LIST_HEAD(vmem_hashlist, vmem_btag);
102
103struct qcache {
104 uma_zone_t qc_cache;
105 vmem_t *qc_vmem;
106 vmem_size_t qc_size;
107 char qc_name[QC_NAME_MAX];
108};
109typedef struct qcache qcache_t;
110#define QC_POOL_TO_QCACHE(pool) ((qcache_t *)(pool->pr_qcache))
111
112#define VMEM_NAME_MAX 16
113
114/* vmem arena */
115struct vmem {
116 struct mtx_padalign vm_lock;
117 struct cv vm_cv;
118 char vm_name[VMEM_NAME_MAX+1];
119 LIST_ENTRY(vmem) vm_alllist;
120 struct vmem_hashlist vm_hash0[VMEM_HASHSIZE_MIN];
121 struct vmem_freelist vm_freelist[VMEM_MAXORDER];
122 struct vmem_seglist vm_seglist;
123 struct vmem_hashlist *vm_hashlist;
124 vmem_size_t vm_hashsize;
125
126 /* Constant after init */
127 vmem_size_t vm_qcache_max;
128 vmem_size_t vm_quantum_mask;
129 vmem_size_t vm_import_quantum;
130 int vm_quantum_shift;
131
132 /* Written on alloc/free */
133 LIST_HEAD(, vmem_btag) vm_freetags;
134 int vm_nfreetags;
135 int vm_nbusytag;
136 vmem_size_t vm_inuse;
137 vmem_size_t vm_size;
138
139 /* Used on import. */
140 vmem_import_t *vm_importfn;
141 vmem_release_t *vm_releasefn;
142 void *vm_arg;
143
144 /* Space exhaustion callback. */
145 vmem_reclaim_t *vm_reclaimfn;
146
147 /* quantum cache */
148 qcache_t vm_qcache[VMEM_QCACHE_IDX_MAX];
149};
150
151/* boundary tag */
152struct vmem_btag {
153 TAILQ_ENTRY(vmem_btag) bt_seglist;
154 union {
155 LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */
156 LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */
157 } bt_u;
158#define bt_hashlist bt_u.u_hashlist
159#define bt_freelist bt_u.u_freelist
160 vmem_addr_t bt_start;
161 vmem_size_t bt_size;
162 int bt_type;
163};
164
165#define BT_TYPE_SPAN 1 /* Allocated from importfn */
166#define BT_TYPE_SPAN_STATIC 2 /* vmem_add() or create. */
167#define BT_TYPE_FREE 3 /* Available space. */
168#define BT_TYPE_BUSY 4 /* Used space. */
169#define BT_ISSPAN_P(bt) ((bt)->bt_type <= BT_TYPE_SPAN_STATIC)
170
171#define BT_END(bt) ((bt)->bt_start + (bt)->bt_size - 1)
172
173#if defined(DIAGNOSTIC)
174static int enable_vmem_check = 1;
175SYSCTL_INT(_debug, OID_AUTO, vmem_check, CTLFLAG_RWTUN,
176 &enable_vmem_check, 0, "Enable vmem check");
177static void vmem_check(vmem_t *);
178#endif
179
180static struct callout vmem_periodic_ch;
181static int vmem_periodic_interval;
182static struct task vmem_periodic_wk;
183
184static struct mtx_padalign vmem_list_lock;
185static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
186
187/* ---- misc */
188#define VMEM_CONDVAR_INIT(vm, wchan) cv_init(&vm->vm_cv, wchan)
189#define VMEM_CONDVAR_DESTROY(vm) cv_destroy(&vm->vm_cv)
190#define VMEM_CONDVAR_WAIT(vm) cv_wait(&vm->vm_cv, &vm->vm_lock)
191#define VMEM_CONDVAR_BROADCAST(vm) cv_broadcast(&vm->vm_cv)
192
193
194#define VMEM_LOCK(vm) mtx_lock(&vm->vm_lock)
195#define VMEM_TRYLOCK(vm) mtx_trylock(&vm->vm_lock)
196#define VMEM_UNLOCK(vm) mtx_unlock(&vm->vm_lock)
197#define VMEM_LOCK_INIT(vm, name) mtx_init(&vm->vm_lock, (name), NULL, MTX_DEF)
198#define VMEM_LOCK_DESTROY(vm) mtx_destroy(&vm->vm_lock)
199#define VMEM_ASSERT_LOCKED(vm) mtx_assert(&vm->vm_lock, MA_OWNED);
200
201#define VMEM_ALIGNUP(addr, align) (-(-(addr) & -(align)))
202
203#define VMEM_CROSS_P(addr1, addr2, boundary) \
204 ((((addr1) ^ (addr2)) & -(boundary)) != 0)
205
206#define ORDER2SIZE(order) ((order) < VMEM_OPTVALUE ? ((order) + 1) : \
207 (vmem_size_t)1 << ((order) - (VMEM_OPTVALUE - VMEM_OPTORDER - 1)))
208#define SIZE2ORDER(size) ((size) <= VMEM_OPTVALUE ? ((size) - 1) : \
209 (flsl(size) + (VMEM_OPTVALUE - VMEM_OPTORDER - 2)))
210
211/*
212 * Maximum number of boundary tags that may be required to satisfy an
213 * allocation. Two may be required to import. Another two may be
214 * required to clip edges.
215 */
216#define BT_MAXALLOC 4
217
218/*
219 * Max free limits the number of locally cached boundary tags. We
220 * just want to avoid hitting the zone allocator for every call.
221 */
222#define BT_MAXFREE (BT_MAXALLOC * 8)
223
224/* Allocator for boundary tags. */
225static uma_zone_t vmem_bt_zone;
226
227/* boot time arena storage. */
228static struct vmem kernel_arena_storage;
229static struct vmem kmem_arena_storage;
230static struct vmem buffer_arena_storage;
231static struct vmem transient_arena_storage;
232vmem_t *kernel_arena = &kernel_arena_storage;
233vmem_t *kmem_arena = &kmem_arena_storage;
234vmem_t *buffer_arena = &buffer_arena_storage;
235vmem_t *transient_arena = &transient_arena_storage;
236
237#ifdef DEBUG_MEMGUARD
238static struct vmem memguard_arena_storage;
239vmem_t *memguard_arena = &memguard_arena_storage;
240#endif
241
242/*
243 * Fill the vmem's boundary tag cache. We guarantee that boundary tag
244 * allocation will not fail once bt_fill() passes. To do so we cache
245 * at least the maximum possible tag allocations in the arena.
246 */
247static int
248bt_fill(vmem_t *vm, int flags)
249{
250 bt_t *bt;
251
252 VMEM_ASSERT_LOCKED(vm);
253
254 /*
255 * Only allow the kmem arena to dip into reserve tags. It is the
256 * vmem where new tags come from.
257 */
258 flags &= BT_FLAGS;
259 if (vm != kmem_arena)
260 flags &= ~M_USE_RESERVE;
261
262 /*
263 * Loop until we meet the reserve. To minimize the lock shuffle
264 * and prevent simultaneous fills we first try a NOWAIT regardless
265 * of the caller's flags. Specify M_NOVM so we don't recurse while
266 * holding a vmem lock.
267 */
268 while (vm->vm_nfreetags < BT_MAXALLOC) {
269 bt = uma_zalloc(vmem_bt_zone,
270 (flags & M_USE_RESERVE) | M_NOWAIT | M_NOVM);
271 if (bt == NULL) {
272 VMEM_UNLOCK(vm);
273 bt = uma_zalloc(vmem_bt_zone, flags);
274 VMEM_LOCK(vm);
275 if (bt == NULL && (flags & M_NOWAIT) != 0)
276 break;
277 }
278 LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
279 vm->vm_nfreetags++;
280 }
281
282 if (vm->vm_nfreetags < BT_MAXALLOC)
283 return ENOMEM;
284
285 return 0;
286}
287
288/*
289 * Pop a tag off of the freetag stack.
290 */
291static bt_t *
292bt_alloc(vmem_t *vm)
293{
294 bt_t *bt;
295
296 VMEM_ASSERT_LOCKED(vm);
297 bt = LIST_FIRST(&vm->vm_freetags);
298 MPASS(bt != NULL);
299 LIST_REMOVE(bt, bt_freelist);
300 vm->vm_nfreetags--;
301
302 return bt;
303}
304
305/*
306 * Trim the per-vmem free list. Returns with the lock released to
307 * avoid allocator recursions.
308 */
309static void
310bt_freetrim(vmem_t *vm, int freelimit)
311{
312 LIST_HEAD(, vmem_btag) freetags;
313 bt_t *bt;
314
315 LIST_INIT(&freetags);
316 VMEM_ASSERT_LOCKED(vm);
317 while (vm->vm_nfreetags > freelimit) {
318 bt = LIST_FIRST(&vm->vm_freetags);
319 LIST_REMOVE(bt, bt_freelist);
320 vm->vm_nfreetags--;
321 LIST_INSERT_HEAD(&freetags, bt, bt_freelist);
322 }
323 VMEM_UNLOCK(vm);
324 while ((bt = LIST_FIRST(&freetags)) != NULL) {
325 LIST_REMOVE(bt, bt_freelist);
326 uma_zfree(vmem_bt_zone, bt);
327 }
328}
329
330static inline void
331bt_free(vmem_t *vm, bt_t *bt)
332{
333
334 VMEM_ASSERT_LOCKED(vm);
335 MPASS(LIST_FIRST(&vm->vm_freetags) != bt);
336 LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
337 vm->vm_nfreetags++;
338}
339
340/*
341 * freelist[0] ... [1, 1]
342 * freelist[1] ... [2, 2]
343 * :
344 * freelist[29] ... [30, 30]
345 * freelist[30] ... [31, 31]
346 * freelist[31] ... [32, 63]
347 * freelist[33] ... [64, 127]
348 * :
349 * freelist[n] ... [(1 << (n - 26)), (1 << (n - 25)) - 1]
350 * :
351 */
352
353static struct vmem_freelist *
354bt_freehead_tofree(vmem_t *vm, vmem_size_t size)
355{
356 const vmem_size_t qsize = size >> vm->vm_quantum_shift;
357 const int idx = SIZE2ORDER(qsize);
358
359 MPASS(size != 0 && qsize != 0);
360 MPASS((size & vm->vm_quantum_mask) == 0);
361 MPASS(idx >= 0);
362 MPASS(idx < VMEM_MAXORDER);
363
364 return &vm->vm_freelist[idx];
365}
366
367/*
368 * bt_freehead_toalloc: return the freelist for the given size and allocation
369 * strategy.
370 *
371 * For M_FIRSTFIT, return the list in which any blocks are large enough
372 * for the requested size. otherwise, return the list which can have blocks
373 * large enough for the requested size.
374 */
375static struct vmem_freelist *
376bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, int strat)
377{
378 const vmem_size_t qsize = size >> vm->vm_quantum_shift;
379 int idx = SIZE2ORDER(qsize);
380
381 MPASS(size != 0 && qsize != 0);
382 MPASS((size & vm->vm_quantum_mask) == 0);
383
384 if (strat == M_FIRSTFIT && ORDER2SIZE(idx) != qsize) {
385 idx++;
386 /* check too large request? */
387 }
388 MPASS(idx >= 0);
389 MPASS(idx < VMEM_MAXORDER);
390
391 return &vm->vm_freelist[idx];
392}
393
394/* ---- boundary tag hash */
395
396static struct vmem_hashlist *
397bt_hashhead(vmem_t *vm, vmem_addr_t addr)
398{
399 struct vmem_hashlist *list;
400 unsigned int hash;
401
402 hash = hash32_buf(&addr, sizeof(addr), 0);
403 list = &vm->vm_hashlist[hash % vm->vm_hashsize];
404
405 return list;
406}
407
408static bt_t *
409bt_lookupbusy(vmem_t *vm, vmem_addr_t addr)
410{
411 struct vmem_hashlist *list;
412 bt_t *bt;
413
414 VMEM_ASSERT_LOCKED(vm);
415 list = bt_hashhead(vm, addr);
416 LIST_FOREACH(bt, list, bt_hashlist) {
417 if (bt->bt_start == addr) {
418 break;
419 }
420 }
421
422 return bt;
423}
424
425static void
426bt_rembusy(vmem_t *vm, bt_t *bt)
427{
428
429 VMEM_ASSERT_LOCKED(vm);
430 MPASS(vm->vm_nbusytag > 0);
431 vm->vm_inuse -= bt->bt_size;
432 vm->vm_nbusytag--;
433 LIST_REMOVE(bt, bt_hashlist);
434}
435
436static void
437bt_insbusy(vmem_t *vm, bt_t *bt)
438{
439 struct vmem_hashlist *list;
440
441 VMEM_ASSERT_LOCKED(vm);
442 MPASS(bt->bt_type == BT_TYPE_BUSY);
443
444 list = bt_hashhead(vm, bt->bt_start);
445 LIST_INSERT_HEAD(list, bt, bt_hashlist);
446 vm->vm_nbusytag++;
447 vm->vm_inuse += bt->bt_size;
448}
449
450/* ---- boundary tag list */
451
452static void
453bt_remseg(vmem_t *vm, bt_t *bt)
454{
455
456 TAILQ_REMOVE(&vm->vm_seglist, bt, bt_seglist);
457 bt_free(vm, bt);
458}
459
460static void
461bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev)
462{
463
464 TAILQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist);
465}
466
467static void
468bt_insseg_tail(vmem_t *vm, bt_t *bt)
469{
470
471 TAILQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist);
472}
473
474static void
475bt_remfree(vmem_t *vm, bt_t *bt)
476{
477
478 MPASS(bt->bt_type == BT_TYPE_FREE);
479
480 LIST_REMOVE(bt, bt_freelist);
481}
482
483static void
484bt_insfree(vmem_t *vm, bt_t *bt)
485{
486 struct vmem_freelist *list;
487
488 list = bt_freehead_tofree(vm, bt->bt_size);
489 LIST_INSERT_HEAD(list, bt, bt_freelist);
490}
491
492/* ---- vmem internal functions */
493
494/*
495 * Import from the arena into the quantum cache in UMA.
496 */
497static int
498qc_import(void *arg, void **store, int cnt, int flags)
499{
500 qcache_t *qc;
501 vmem_addr_t addr;
502 int i;
503
504 qc = arg;
505 if ((flags & VMEM_FITMASK) == 0)
506 flags |= M_BESTFIT;
507 for (i = 0; i < cnt; i++) {
508 if (vmem_xalloc(qc->qc_vmem, qc->qc_size, 0, 0, 0,
509 VMEM_ADDR_MIN, VMEM_ADDR_MAX, flags, &addr) != 0)
510 break;
511 store[i] = (void *)addr;
512 /* Only guarantee one allocation. */
513 flags &= ~M_WAITOK;
514 flags |= M_NOWAIT;
515 }
516 return i;
517}
518
519/*
520 * Release memory from the UMA cache to the arena.
521 */
522static void
523qc_release(void *arg, void **store, int cnt)
524{
525 qcache_t *qc;
526 int i;
527
528 qc = arg;
529 for (i = 0; i < cnt; i++)
530 vmem_xfree(qc->qc_vmem, (vmem_addr_t)store[i], qc->qc_size);
531}
532
533static void
534qc_init(vmem_t *vm, vmem_size_t qcache_max)
535{
536 qcache_t *qc;
537 vmem_size_t size;
538 int qcache_idx_max;
539 int i;
540
541 MPASS((qcache_max & vm->vm_quantum_mask) == 0);
542 qcache_idx_max = MIN(qcache_max >> vm->vm_quantum_shift,
543 VMEM_QCACHE_IDX_MAX);
544 vm->vm_qcache_max = qcache_idx_max << vm->vm_quantum_shift;
545 for (i = 0; i < qcache_idx_max; i++) {
546 qc = &vm->vm_qcache[i];
547 size = (i + 1) << vm->vm_quantum_shift;
548 snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu",
549 vm->vm_name, size);
550 qc->qc_vmem = vm;
551 qc->qc_size = size;
552 qc->qc_cache = uma_zcache_create(qc->qc_name, size,
553 NULL, NULL, NULL, NULL, qc_import, qc_release, qc,
554 UMA_ZONE_VM);
555 MPASS(qc->qc_cache);
556 }
557}
558
559static void
560qc_destroy(vmem_t *vm)
561{
562 int qcache_idx_max;
563 int i;
564
565 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
566 for (i = 0; i < qcache_idx_max; i++)
567 uma_zdestroy(vm->vm_qcache[i].qc_cache);
568}
569
570static void
571qc_drain(vmem_t *vm)
572{
573 int qcache_idx_max;
574 int i;
575
576 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
577 for (i = 0; i < qcache_idx_max; i++)
578 zone_drain(vm->vm_qcache[i].qc_cache);
579}
580
581#ifndef UMA_MD_SMALL_ALLOC
582
583static struct mtx_padalign vmem_bt_lock;
584
585/*
586 * vmem_bt_alloc: Allocate a new page of boundary tags.
587 *
588 * On architectures with uma_small_alloc there is no recursion; no address
589 * space need be allocated to allocate boundary tags. For the others, we
590 * must handle recursion. Boundary tags are necessary to allocate new
591 * boundary tags.
592 *
593 * UMA guarantees that enough tags are held in reserve to allocate a new
594 * page of kva. We dip into this reserve by specifying M_USE_RESERVE only
595 * when allocating the page to hold new boundary tags. In this way the
596 * reserve is automatically filled by the allocation that uses the reserve.
597 *
598 * We still have to guarantee that the new tags are allocated atomically since
599 * many threads may try concurrently. The bt_lock provides this guarantee.
600 * We convert WAITOK allocations to NOWAIT and then handle the blocking here
601 * on failure. It's ok to return NULL for a WAITOK allocation as UMA will
602 * loop again after checking to see if we lost the race to allocate.
603 *
604 * There is a small race between vmem_bt_alloc() returning the page and the
605 * zone lock being acquired to add the page to the zone. For WAITOK
606 * allocations we just pause briefly. NOWAIT may experience a transient
607 * failure. To alleviate this we permit a small number of simultaneous
608 * fills to proceed concurrently so NOWAIT is less likely to fail unless
609 * we are really out of KVA.
610 */
611static void *
612vmem_bt_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait)
613{
614 vmem_addr_t addr;
615
616 *pflag = UMA_SLAB_KMEM;
617
618 /*
619 * Single thread boundary tag allocation so that the address space
620 * and memory are added in one atomic operation.
621 */
622 mtx_lock(&vmem_bt_lock);
623 if (vmem_xalloc(kmem_arena, bytes, 0, 0, 0, VMEM_ADDR_MIN,
624 VMEM_ADDR_MAX, M_NOWAIT | M_NOVM | M_USE_RESERVE | M_BESTFIT,
625 &addr) == 0) {
626 if (kmem_back(kmem_object, addr, bytes,
627 M_NOWAIT | M_USE_RESERVE) == 0) {
628 mtx_unlock(&vmem_bt_lock);
629 return ((void *)addr);
630 }
631 vmem_xfree(kmem_arena, addr, bytes);
632 mtx_unlock(&vmem_bt_lock);
633 /*
634 * Out of memory, not address space. This may not even be
635 * possible due to M_USE_RESERVE page allocation.
636 */
637 if (wait & M_WAITOK)
638 VM_WAIT;
639 return (NULL);
640 }
641 mtx_unlock(&vmem_bt_lock);
642 /*
643 * We're either out of address space or lost a fill race.
644 */
645 if (wait & M_WAITOK)
646 pause("btalloc", 1);
647
648 return (NULL);
649}
650#endif
651
652void
653vmem_startup(void)
654{
655
656 mtx_init(&vmem_list_lock, "vmem list lock", NULL, MTX_DEF);
657 vmem_bt_zone = uma_zcreate("vmem btag",
658 sizeof(struct vmem_btag), NULL, NULL, NULL, NULL,
659 UMA_ALIGN_PTR, UMA_ZONE_VM);
660#ifndef UMA_MD_SMALL_ALLOC
661 mtx_init(&vmem_bt_lock, "btag lock", NULL, MTX_DEF);
662 uma_prealloc(vmem_bt_zone, BT_MAXALLOC);
663 /*
664 * Reserve enough tags to allocate new tags. We allow multiple
665 * CPUs to attempt to allocate new tags concurrently to limit
666 * false restarts in UMA.
667 */
668 uma_zone_reserve(vmem_bt_zone, BT_MAXALLOC * (mp_ncpus + 1) / 2);
669 uma_zone_set_allocf(vmem_bt_zone, vmem_bt_alloc);
670#endif
671}
672
673/* ---- rehash */
674
675static int
676vmem_rehash(vmem_t *vm, vmem_size_t newhashsize)
677{
678 bt_t *bt;
679 int i;
680 struct vmem_hashlist *newhashlist;
681 struct vmem_hashlist *oldhashlist;
682 vmem_size_t oldhashsize;
683
684 MPASS(newhashsize > 0);
685
686 newhashlist = malloc(sizeof(struct vmem_hashlist) * newhashsize,
687 M_VMEM, M_NOWAIT);
688 if (newhashlist == NULL)
689 return ENOMEM;
690 for (i = 0; i < newhashsize; i++) {
691 LIST_INIT(&newhashlist[i]);
692 }
693
694 VMEM_LOCK(vm);
695 oldhashlist = vm->vm_hashlist;
696 oldhashsize = vm->vm_hashsize;
697 vm->vm_hashlist = newhashlist;
698 vm->vm_hashsize = newhashsize;
699 if (oldhashlist == NULL) {
700 VMEM_UNLOCK(vm);
701 return 0;
702 }
703 for (i = 0; i < oldhashsize; i++) {
704 while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) {
705 bt_rembusy(vm, bt);
706 bt_insbusy(vm, bt);
707 }
708 }
709 VMEM_UNLOCK(vm);
710
711 if (oldhashlist != vm->vm_hash0) {
712 free(oldhashlist, M_VMEM);
713 }
714
715 return 0;
716}
717
718static void
719vmem_periodic_kick(void *dummy)
720{
721
722 taskqueue_enqueue(taskqueue_thread, &vmem_periodic_wk);
723}
724
725static void
726vmem_periodic(void *unused, int pending)
727{
728 vmem_t *vm;
729 vmem_size_t desired;
730 vmem_size_t current;
731
732 mtx_lock(&vmem_list_lock);
733 LIST_FOREACH(vm, &vmem_list, vm_alllist) {
734#ifdef DIAGNOSTIC
735 /* Convenient time to verify vmem state. */
736 if (enable_vmem_check == 1) {
737 VMEM_LOCK(vm);
738 vmem_check(vm);
739 VMEM_UNLOCK(vm);
740 }
741#endif
742 desired = 1 << flsl(vm->vm_nbusytag);
743 desired = MIN(MAX(desired, VMEM_HASHSIZE_MIN),
744 VMEM_HASHSIZE_MAX);
745 current = vm->vm_hashsize;
746
747 /* Grow in powers of two. Shrink less aggressively. */
748 if (desired >= current * 2 || desired * 4 <= current)
749 vmem_rehash(vm, desired);
750 }
751 mtx_unlock(&vmem_list_lock);
752
753 callout_reset(&vmem_periodic_ch, vmem_periodic_interval,
754 vmem_periodic_kick, NULL);
755}
756
757static void
758vmem_start_callout(void *unused)
759{
760
761 TASK_INIT(&vmem_periodic_wk, 0, vmem_periodic, NULL);
762 vmem_periodic_interval = hz * 10;
763 callout_init(&vmem_periodic_ch, CALLOUT_MPSAFE);
764 callout_reset(&vmem_periodic_ch, vmem_periodic_interval,
765 vmem_periodic_kick, NULL);
766}
767SYSINIT(vfs, SI_SUB_CONFIGURE, SI_ORDER_ANY, vmem_start_callout, NULL);
768
769static void
770vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int type)
771{
772 bt_t *btspan;
773 bt_t *btfree;
774
775 MPASS(type == BT_TYPE_SPAN || type == BT_TYPE_SPAN_STATIC);
776 MPASS((size & vm->vm_quantum_mask) == 0);
777
778 btspan = bt_alloc(vm);
779 btspan->bt_type = type;
780 btspan->bt_start = addr;
781 btspan->bt_size = size;
782 bt_insseg_tail(vm, btspan);
783
784 btfree = bt_alloc(vm);
785 btfree->bt_type = BT_TYPE_FREE;
786 btfree->bt_start = addr;
787 btfree->bt_size = size;
788 bt_insseg(vm, btfree, btspan);
789 bt_insfree(vm, btfree);
790
791 vm->vm_size += size;
792}
793
794static void
795vmem_destroy1(vmem_t *vm)
796{
797 bt_t *bt;
798
799 /*
800 * Drain per-cpu quantum caches.
801 */
802 qc_destroy(vm);
803
804 /*
805 * The vmem should now only contain empty segments.
806 */
807 VMEM_LOCK(vm);
808 MPASS(vm->vm_nbusytag == 0);
809
810 while ((bt = TAILQ_FIRST(&vm->vm_seglist)) != NULL)
811 bt_remseg(vm, bt);
812
813 if (vm->vm_hashlist != NULL && vm->vm_hashlist != vm->vm_hash0)
814 free(vm->vm_hashlist, M_VMEM);
815
816 bt_freetrim(vm, 0);
817
818 VMEM_CONDVAR_DESTROY(vm);
819 VMEM_LOCK_DESTROY(vm);
820 free(vm, M_VMEM);
821}
822
823static int
824vmem_import(vmem_t *vm, vmem_size_t size, vmem_size_t align, int flags)
825{
826 vmem_addr_t addr;
827 int error;
828
829 if (vm->vm_importfn == NULL)
830 return EINVAL;
831
832 /*
833 * To make sure we get a span that meets the alignment we double it
834 * and add the size to the tail. This slightly overestimates.
835 */
836 if (align != vm->vm_quantum_mask + 1)
837 size = (align * 2) + size;
838 size = roundup(size, vm->vm_import_quantum);
839
840 /*
841 * Hide MAXALLOC tags so we're guaranteed to be able to add this
842 * span and the tag we want to allocate from it.
843 */
844 MPASS(vm->vm_nfreetags >= BT_MAXALLOC);
845 vm->vm_nfreetags -= BT_MAXALLOC;
846 VMEM_UNLOCK(vm);
847 error = (vm->vm_importfn)(vm->vm_arg, size, flags, &addr);
848 VMEM_LOCK(vm);
849 vm->vm_nfreetags += BT_MAXALLOC;
850 if (error)
851 return ENOMEM;
852
853 vmem_add1(vm, addr, size, BT_TYPE_SPAN);
854
855 return 0;
856}
857
858/*
859 * vmem_fit: check if a bt can satisfy the given restrictions.
860 *
861 * it's a caller's responsibility to ensure the region is big enough
862 * before calling us.
863 */
864static int
865vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align,
866 vmem_size_t phase, vmem_size_t nocross, vmem_addr_t minaddr,
867 vmem_addr_t maxaddr, vmem_addr_t *addrp)
868{
869 vmem_addr_t start;
870 vmem_addr_t end;
871
872 MPASS(size > 0);
873 MPASS(bt->bt_size >= size); /* caller's responsibility */
874
875 /*
876 * XXX assumption: vmem_addr_t and vmem_size_t are
877 * unsigned integer of the same size.
878 */
879
880 start = bt->bt_start;
881 if (start < minaddr) {
882 start = minaddr;
883 }
884 end = BT_END(bt);
885 if (end > maxaddr)
886 end = maxaddr;
887 if (start > end)
888 return (ENOMEM);
889
890 start = VMEM_ALIGNUP(start - phase, align) + phase;
891 if (start < bt->bt_start)
892 start += align;
893 if (VMEM_CROSS_P(start, start + size - 1, nocross)) {
894 MPASS(align < nocross);
895 start = VMEM_ALIGNUP(start - phase, nocross) + phase;
896 }
897 if (start <= end && end - start >= size - 1) {
898 MPASS((start & (align - 1)) == phase);
899 MPASS(!VMEM_CROSS_P(start, start + size - 1, nocross));
900 MPASS(minaddr <= start);
901 MPASS(maxaddr == 0 || start + size - 1 <= maxaddr);
902 MPASS(bt->bt_start <= start);
903 MPASS(BT_END(bt) - start >= size - 1);
904 *addrp = start;
905
906 return (0);
907 }
908 return (ENOMEM);
909}
910
911/*
912 * vmem_clip: Trim the boundary tag edges to the requested start and size.
913 */
914static void
915vmem_clip(vmem_t *vm, bt_t *bt, vmem_addr_t start, vmem_size_t size)
916{
917 bt_t *btnew;
918 bt_t *btprev;
919
920 VMEM_ASSERT_LOCKED(vm);
921 MPASS(bt->bt_type == BT_TYPE_FREE);
922 MPASS(bt->bt_size >= size);
923 bt_remfree(vm, bt);
924 if (bt->bt_start != start) {
925 btprev = bt_alloc(vm);
926 btprev->bt_type = BT_TYPE_FREE;
927 btprev->bt_start = bt->bt_start;
928 btprev->bt_size = start - bt->bt_start;
929 bt->bt_start = start;
930 bt->bt_size -= btprev->bt_size;
931 bt_insfree(vm, btprev);
932 bt_insseg(vm, btprev,
933 TAILQ_PREV(bt, vmem_seglist, bt_seglist));
934 }
935 MPASS(bt->bt_start == start);
936 if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) {
937 /* split */
938 btnew = bt_alloc(vm);
939 btnew->bt_type = BT_TYPE_BUSY;
940 btnew->bt_start = bt->bt_start;
941 btnew->bt_size = size;
942 bt->bt_start = bt->bt_start + size;
943 bt->bt_size -= size;
944 bt_insfree(vm, bt);
945 bt_insseg(vm, btnew,
946 TAILQ_PREV(bt, vmem_seglist, bt_seglist));
947 bt_insbusy(vm, btnew);
948 bt = btnew;
949 } else {
950 bt->bt_type = BT_TYPE_BUSY;
951 bt_insbusy(vm, bt);
952 }
953 MPASS(bt->bt_size >= size);
954 bt->bt_type = BT_TYPE_BUSY;
955}
956
957/* ---- vmem API */
958
959void
960vmem_set_import(vmem_t *vm, vmem_import_t *importfn,
961 vmem_release_t *releasefn, void *arg, vmem_size_t import_quantum)
962{
963
964 VMEM_LOCK(vm);
965 vm->vm_importfn = importfn;
966 vm->vm_releasefn = releasefn;
967 vm->vm_arg = arg;
968 vm->vm_import_quantum = import_quantum;
969 VMEM_UNLOCK(vm);
970}
971
972void
973vmem_set_reclaim(vmem_t *vm, vmem_reclaim_t *reclaimfn)
974{
975
976 VMEM_LOCK(vm);
977 vm->vm_reclaimfn = reclaimfn;
978 VMEM_UNLOCK(vm);
979}
980
981/*
982 * vmem_init: Initializes vmem arena.
983 */
984vmem_t *
985vmem_init(vmem_t *vm, const char *name, vmem_addr_t base, vmem_size_t size,
986 vmem_size_t quantum, vmem_size_t qcache_max, int flags)
987{
988 int i;
989
990 MPASS(quantum > 0);
991 MPASS((quantum & (quantum - 1)) == 0);
992
993 bzero(vm, sizeof(*vm));
994
995 VMEM_CONDVAR_INIT(vm, name);
996 VMEM_LOCK_INIT(vm, name);
997 vm->vm_nfreetags = 0;
998 LIST_INIT(&vm->vm_freetags);
999 strlcpy(vm->vm_name, name, sizeof(vm->vm_name));
1000 vm->vm_quantum_mask = quantum - 1;
1001 vm->vm_quantum_shift = flsl(quantum) - 1;
1002 vm->vm_nbusytag = 0;
1003 vm->vm_size = 0;
1004 vm->vm_inuse = 0;
1005 qc_init(vm, qcache_max);
1006
1007 TAILQ_INIT(&vm->vm_seglist);
1008 for (i = 0; i < VMEM_MAXORDER; i++) {
1009 LIST_INIT(&vm->vm_freelist[i]);
1010 }
1011 memset(&vm->vm_hash0, 0, sizeof(vm->vm_hash0));
1012 vm->vm_hashsize = VMEM_HASHSIZE_MIN;
1013 vm->vm_hashlist = vm->vm_hash0;
1014
1015 if (size != 0) {
1016 if (vmem_add(vm, base, size, flags) != 0) {
1017 vmem_destroy1(vm);
1018 return NULL;
1019 }
1020 }
1021
1022 mtx_lock(&vmem_list_lock);
1023 LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist);
1024 mtx_unlock(&vmem_list_lock);
1025
1026 return vm;
1027}
1028
1029/*
1030 * vmem_create: create an arena.
1031 */
1032vmem_t *
1033vmem_create(const char *name, vmem_addr_t base, vmem_size_t size,
1034 vmem_size_t quantum, vmem_size_t qcache_max, int flags)
1035{
1036
1037 vmem_t *vm;
1038
1039 vm = malloc(sizeof(*vm), M_VMEM, flags & (M_WAITOK|M_NOWAIT));
1040 if (vm == NULL)
1041 return (NULL);
1042 if (vmem_init(vm, name, base, size, quantum, qcache_max,
1043 flags) == NULL) {
1044 free(vm, M_VMEM);
1045 return (NULL);
1046 }
1047 return (vm);
1048}
1049
1050void
1051vmem_destroy(vmem_t *vm)
1052{
1053
1054 mtx_lock(&vmem_list_lock);
1055 LIST_REMOVE(vm, vm_alllist);
1056 mtx_unlock(&vmem_list_lock);
1057
1058 vmem_destroy1(vm);
1059}
1060
1061vmem_size_t
1062vmem_roundup_size(vmem_t *vm, vmem_size_t size)
1063{
1064
1065 return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask;
1066}
1067
1068/*
1069 * vmem_alloc: allocate resource from the arena.
1070 */
1071int
1072vmem_alloc(vmem_t *vm, vmem_size_t size, int flags, vmem_addr_t *addrp)
1073{
1074 const int strat __unused = flags & VMEM_FITMASK;
1075 qcache_t *qc;
1076
1077 flags &= VMEM_FLAGS;
1078 MPASS(size > 0);
1079 MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT);
1080 if ((flags & M_NOWAIT) == 0)
1081 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_alloc");
1082
1083 if (size <= vm->vm_qcache_max) {
1084 qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift];
1085 *addrp = (vmem_addr_t)uma_zalloc(qc->qc_cache, flags);
1086 if (*addrp == 0)
1087 return (ENOMEM);
1088 return (0);
1089 }
1090
1091 return vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
1092 flags, addrp);
1093}
1094
1095int
1096vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align,
1097 const vmem_size_t phase, const vmem_size_t nocross,
1098 const vmem_addr_t minaddr, const vmem_addr_t maxaddr, int flags,
1099 vmem_addr_t *addrp)
1100{
1101 const vmem_size_t size = vmem_roundup_size(vm, size0);
1102 struct vmem_freelist *list;
1103 struct vmem_freelist *first;
1104 struct vmem_freelist *end;
1105 vmem_size_t avail;
1106 bt_t *bt;
1107 int error;
1108 int strat;
1109
1110 flags &= VMEM_FLAGS;
1111 strat = flags & VMEM_FITMASK;
1112 MPASS(size0 > 0);
1113 MPASS(size > 0);
1114 MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT);
1115 MPASS((flags & (M_NOWAIT|M_WAITOK)) != (M_NOWAIT|M_WAITOK));
1116 if ((flags & M_NOWAIT) == 0)
1117 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_xalloc");
1118 MPASS((align & vm->vm_quantum_mask) == 0);
1119 MPASS((align & (align - 1)) == 0);
1120 MPASS((phase & vm->vm_quantum_mask) == 0);
1121 MPASS((nocross & vm->vm_quantum_mask) == 0);
1122 MPASS((nocross & (nocross - 1)) == 0);
1123 MPASS((align == 0 && phase == 0) || phase < align);
1124 MPASS(nocross == 0 || nocross >= size);
1125 MPASS(minaddr <= maxaddr);
1126 MPASS(!VMEM_CROSS_P(phase, phase + size - 1, nocross));
1127
1128 if (align == 0)
1129 align = vm->vm_quantum_mask + 1;
1130
1131 *addrp = 0;
1132 end = &vm->vm_freelist[VMEM_MAXORDER];
1133 /*
1134 * choose a free block from which we allocate.
1135 */
1136 first = bt_freehead_toalloc(vm, size, strat);
1137 VMEM_LOCK(vm);
1138 for (;;) {
1139 /*
1140 * Make sure we have enough tags to complete the
1141 * operation.
1142 */
1143 if (vm->vm_nfreetags < BT_MAXALLOC &&
1144 bt_fill(vm, flags) != 0) {
1145 error = ENOMEM;
1146 break;
1147 }
1148 /*
1149 * Scan freelists looking for a tag that satisfies the
1150 * allocation. If we're doing BESTFIT we may encounter
1151 * sizes below the request. If we're doing FIRSTFIT we
1152 * inspect only the first element from each list.
1153 */
1154 for (list = first; list < end; list++) {
1155 LIST_FOREACH(bt, list, bt_freelist) {
1156 if (bt->bt_size >= size) {
1157 error = vmem_fit(bt, size, align, phase,
1158 nocross, minaddr, maxaddr, addrp);
1159 if (error == 0) {
1160 vmem_clip(vm, bt, *addrp, size);
1161 goto out;
1162 }
1163 }
1164 /* FIRST skips to the next list. */
1165 if (strat == M_FIRSTFIT)
1166 break;
1167 }
1168 }
1169 /*
1170 * Retry if the fast algorithm failed.
1171 */
1172 if (strat == M_FIRSTFIT) {
1173 strat = M_BESTFIT;
1174 first = bt_freehead_toalloc(vm, size, strat);
1175 continue;
1176 }
1177 /*
1178 * XXX it is possible to fail to meet restrictions with the
1179 * imported region. It is up to the user to specify the
1180 * import quantum such that it can satisfy any allocation.
1181 */
1182 if (vmem_import(vm, size, align, flags) == 0)
1183 continue;
1184
1185 /*
1186 * Try to free some space from the quantum cache or reclaim
1187 * functions if available.
1188 */
1189 if (vm->vm_qcache_max != 0 || vm->vm_reclaimfn != NULL) {
1190 avail = vm->vm_size - vm->vm_inuse;
1191 VMEM_UNLOCK(vm);
1192 if (vm->vm_qcache_max != 0)
1193 qc_drain(vm);
1194 if (vm->vm_reclaimfn != NULL)
1195 vm->vm_reclaimfn(vm, flags);
1196 VMEM_LOCK(vm);
1197 /* If we were successful retry even NOWAIT. */
1198 if (vm->vm_size - vm->vm_inuse > avail)
1199 continue;
1200 }
1201 if ((flags & M_NOWAIT) != 0) {
1202 error = ENOMEM;
1203 break;
1204 }
1205 VMEM_CONDVAR_WAIT(vm);
1206 }
1207out:
1208 VMEM_UNLOCK(vm);
1209 if (error != 0 && (flags & M_NOWAIT) == 0)
1210 panic("failed to allocate waiting allocation\n");
1211
1212 return (error);
1213}
1214
1215/*
1216 * vmem_free: free the resource to the arena.
1217 */
1218void
1219vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1220{
1221 qcache_t *qc;
1222 MPASS(size > 0);
1223
1224 if (size <= vm->vm_qcache_max) {
1225 qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift];
1226 uma_zfree(qc->qc_cache, (void *)addr);
1227 } else
1228 vmem_xfree(vm, addr, size);
1229}
1230
1231void
1232vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1233{
1234 bt_t *bt;
1235 bt_t *t;
1236
1237 MPASS(size > 0);
1238
1239 VMEM_LOCK(vm);
1240 bt = bt_lookupbusy(vm, addr);
1241 MPASS(bt != NULL);
1242 MPASS(bt->bt_start == addr);
1243 MPASS(bt->bt_size == vmem_roundup_size(vm, size) ||
1244 bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask);
1245 MPASS(bt->bt_type == BT_TYPE_BUSY);
1246 bt_rembusy(vm, bt);
1247 bt->bt_type = BT_TYPE_FREE;
1248
1249 /* coalesce */
1250 t = TAILQ_NEXT(bt, bt_seglist);
1251 if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1252 MPASS(BT_END(bt) < t->bt_start); /* YYY */
1253 bt->bt_size += t->bt_size;
1254 bt_remfree(vm, t);
1255 bt_remseg(vm, t);
1256 }
1257 t = TAILQ_PREV(bt, vmem_seglist, bt_seglist);
1258 if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1259 MPASS(BT_END(t) < bt->bt_start); /* YYY */
1260 bt->bt_size += t->bt_size;
1261 bt->bt_start = t->bt_start;
1262 bt_remfree(vm, t);
1263 bt_remseg(vm, t);
1264 }
1265
1266 t = TAILQ_PREV(bt, vmem_seglist, bt_seglist);
1267 MPASS(t != NULL);
1268 MPASS(BT_ISSPAN_P(t) || t->bt_type == BT_TYPE_BUSY);
1269 if (vm->vm_releasefn != NULL && t->bt_type == BT_TYPE_SPAN &&
1270 t->bt_size == bt->bt_size) {
1271 vmem_addr_t spanaddr;
1272 vmem_size_t spansize;
1273
1274 MPASS(t->bt_start == bt->bt_start);
1275 spanaddr = bt->bt_start;
1276 spansize = bt->bt_size;
1277 bt_remseg(vm, bt);
1278 bt_remseg(vm, t);
1279 vm->vm_size -= spansize;
1280 VMEM_CONDVAR_BROADCAST(vm);
1281 bt_freetrim(vm, BT_MAXFREE);
1282 (*vm->vm_releasefn)(vm->vm_arg, spanaddr, spansize);
1283 } else {
1284 bt_insfree(vm, bt);
1285 VMEM_CONDVAR_BROADCAST(vm);
1286 bt_freetrim(vm, BT_MAXFREE);
1287 }
1288}
1289
1290/*
1291 * vmem_add:
1292 *
1293 */
1294int
1295vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int flags)
1296{
1297 int error;
1298
1299 error = 0;
1300 flags &= VMEM_FLAGS;
1301 VMEM_LOCK(vm);
1302 if (vm->vm_nfreetags >= BT_MAXALLOC || bt_fill(vm, flags) == 0)
1303 vmem_add1(vm, addr, size, BT_TYPE_SPAN_STATIC);
1304 else
1305 error = ENOMEM;
1306 VMEM_UNLOCK(vm);
1307
1308 return (error);
1309}
1310
1311/*
1312 * vmem_size: information about arenas size
1313 */
1314vmem_size_t
1315vmem_size(vmem_t *vm, int typemask)
1316{
1317
1318 switch (typemask) {
1319 case VMEM_ALLOC:
1320 return vm->vm_inuse;
1321 case VMEM_FREE:
1322 return vm->vm_size - vm->vm_inuse;
1323 case VMEM_FREE|VMEM_ALLOC:
1324 return vm->vm_size;
1325 default:
1326 panic("vmem_size");
1327 }
1328}
1329
1330/* ---- debug */
1331
1332#if defined(DDB) || defined(DIAGNOSTIC)
1333
1334static void bt_dump(const bt_t *, int (*)(const char *, ...)
1335 __printflike(1, 2));
1336
1337static const char *
1338bt_type_string(int type)
1339{
1340
1341 switch (type) {
1342 case BT_TYPE_BUSY:
1343 return "busy";
1344 case BT_TYPE_FREE:
1345 return "free";
1346 case BT_TYPE_SPAN:
1347 return "span";
1348 case BT_TYPE_SPAN_STATIC:
1349 return "static span";
1350 default:
1351 break;
1352 }
1353 return "BOGUS";
1354}
1355
1356static void
1357bt_dump(const bt_t *bt, int (*pr)(const char *, ...))
1358{
1359
1360 (*pr)("\t%p: %jx %jx, %d(%s)\n",
1361 bt, (intmax_t)bt->bt_start, (intmax_t)bt->bt_size,
1362 bt->bt_type, bt_type_string(bt->bt_type));
1363}
1364
1365static void
1366vmem_dump(const vmem_t *vm , int (*pr)(const char *, ...) __printflike(1, 2))
1367{
1368 const bt_t *bt;
1369 int i;
1370
1371 (*pr)("vmem %p '%s'\n", vm, vm->vm_name);
1372 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1373 bt_dump(bt, pr);
1374 }
1375
1376 for (i = 0; i < VMEM_MAXORDER; i++) {
1377 const struct vmem_freelist *fl = &vm->vm_freelist[i];
1378
1379 if (LIST_EMPTY(fl)) {
1380 continue;
1381 }
1382
1383 (*pr)("freelist[%d]\n", i);
1384 LIST_FOREACH(bt, fl, bt_freelist) {
1385 bt_dump(bt, pr);
1386 }
1387 }
1388}
1389
1390#endif /* defined(DDB) || defined(DIAGNOSTIC) */
1391
1392#if defined(DDB)
1393#include <ddb/ddb.h>
1394
1395static bt_t *
1396vmem_whatis_lookup(vmem_t *vm, vmem_addr_t addr)
1397{
1398 bt_t *bt;
1399
1400 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1401 if (BT_ISSPAN_P(bt)) {
1402 continue;
1403 }
1404 if (bt->bt_start <= addr && addr <= BT_END(bt)) {
1405 return bt;
1406 }
1407 }
1408
1409 return NULL;
1410}
1411
1412void
1413vmem_whatis(vmem_addr_t addr, int (*pr)(const char *, ...))
1414{
1415 vmem_t *vm;
1416
1417 LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1418 bt_t *bt;
1419
1420 bt = vmem_whatis_lookup(vm, addr);
1421 if (bt == NULL) {
1422 continue;
1423 }
1424 (*pr)("%p is %p+%zu in VMEM '%s' (%s)\n",
1425 (void *)addr, (void *)bt->bt_start,
1426 (vmem_size_t)(addr - bt->bt_start), vm->vm_name,
1427 (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free");
1428 }
1429}
1430
1431void
1432vmem_printall(const char *modif, int (*pr)(const char *, ...))
1433{
1434 const vmem_t *vm;
1435
1436 LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1437 vmem_dump(vm, pr);
1438 }
1439}
1440
1441void
1442vmem_print(vmem_addr_t addr, const char *modif, int (*pr)(const char *, ...))
1443{
1444 const vmem_t *vm = (const void *)addr;
1445
1446 vmem_dump(vm, pr);
1447}
1448
1449DB_SHOW_COMMAND(vmemdump, vmemdump)
1450{
1451
1452 if (!have_addr) {
1453 db_printf("usage: show vmemdump <addr>\n");
1454 return;
1455 }
1456
1457 vmem_dump((const vmem_t *)addr, db_printf);
1458}
1459
1460DB_SHOW_ALL_COMMAND(vmemdump, vmemdumpall)
1461{
1462 const vmem_t *vm;
1463
1464 LIST_FOREACH(vm, &vmem_list, vm_alllist)
1465 vmem_dump(vm, db_printf);
1466}
1467
1468DB_SHOW_COMMAND(vmem, vmem_summ)
1469{
1470 const vmem_t *vm = (const void *)addr;
1471 const bt_t *bt;
1472 size_t ft[VMEM_MAXORDER], ut[VMEM_MAXORDER];
1473 size_t fs[VMEM_MAXORDER], us[VMEM_MAXORDER];
1474 int ord;
1475
1476 if (!have_addr) {
1477 db_printf("usage: show vmem <addr>\n");
1478 return;
1479 }
1480
1481 db_printf("vmem %p '%s'\n", vm, vm->vm_name);
1482 db_printf("\tquantum:\t%zu\n", vm->vm_quantum_mask + 1);
1483 db_printf("\tsize:\t%zu\n", vm->vm_size);
1484 db_printf("\tinuse:\t%zu\n", vm->vm_inuse);
1485 db_printf("\tfree:\t%zu\n", vm->vm_size - vm->vm_inuse);
1486 db_printf("\tbusy tags:\t%d\n", vm->vm_nbusytag);
1487 db_printf("\tfree tags:\t%d\n", vm->vm_nfreetags);
1488
1489 memset(&ft, 0, sizeof(ft));
1490 memset(&ut, 0, sizeof(ut));
1491 memset(&fs, 0, sizeof(fs));
1492 memset(&us, 0, sizeof(us));
1493 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1494 ord = SIZE2ORDER(bt->bt_size >> vm->vm_quantum_shift);
1495 if (bt->bt_type == BT_TYPE_BUSY) {
1496 ut[ord]++;
1497 us[ord] += bt->bt_size;
1498 } else if (bt->bt_type == BT_TYPE_FREE) {
1499 ft[ord]++;
1500 fs[ord] += bt->bt_size;
1501 }
1502 }
1503 db_printf("\t\t\tinuse\tsize\t\tfree\tsize\n");
1504 for (ord = 0; ord < VMEM_MAXORDER; ord++) {
1505 if (ut[ord] == 0 && ft[ord] == 0)
1506 continue;
1507 db_printf("\t%-15zu %zu\t%-15zu %zu\t%-16zu\n",
1508 ORDER2SIZE(ord) << vm->vm_quantum_shift,
1509 ut[ord], us[ord], ft[ord], fs[ord]);
1510 }
1511}
1512
1513DB_SHOW_ALL_COMMAND(vmem, vmem_summall)
1514{
1515 const vmem_t *vm;
1516
1517 LIST_FOREACH(vm, &vmem_list, vm_alllist)
1518 vmem_summ((db_expr_t)vm, TRUE, count, modif);
1519}
1520#endif /* defined(DDB) */
1521
1522#define vmem_printf printf
1523
1524#if defined(DIAGNOSTIC)
1525
1526static bool
1527vmem_check_sanity(vmem_t *vm)
1528{
1529 const bt_t *bt, *bt2;
1530
1531 MPASS(vm != NULL);
1532
1533 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1534 if (bt->bt_start > BT_END(bt)) {
1535 printf("corrupted tag\n");
1536 bt_dump(bt, vmem_printf);
1537 return false;
1538 }
1539 }
1540 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1541 TAILQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) {
1542 if (bt == bt2) {
1543 continue;
1544 }
1545 if (BT_ISSPAN_P(bt) != BT_ISSPAN_P(bt2)) {
1546 continue;
1547 }
1548 if (bt->bt_start <= BT_END(bt2) &&
1549 bt2->bt_start <= BT_END(bt)) {
1550 printf("overwrapped tags\n");
1551 bt_dump(bt, vmem_printf);
1552 bt_dump(bt2, vmem_printf);
1553 return false;
1554 }
1555 }
1556 }
1557
1558 return true;
1559}
1560
1561static void
1562vmem_check(vmem_t *vm)
1563{
1564
1565 if (!vmem_check_sanity(vm)) {
1566 panic("insanity vmem %p", vm);
1567 }
1568}
1569
1570#endif /* defined(DIAGNOSTIC) */